/* Saves the target into in parsable form to stdout. */ static void save(const struct ipt_ip *ip, const struct ipt_entry_target *target) { const struct ipt_connmark_target_info *markinfo = (const struct ipt_connmark_target_info *)target->data; switch (markinfo->mode) { case IPT_CONNMARK_SET: printf("--set-mark "); print_mark(markinfo->mark); print_mask("/", markinfo->mask); printf(" "); break; case IPT_CONNMARK_SAVE: printf("--save-mark "); print_mask("--mask ", markinfo->mask); break; case IPT_CONNMARK_RESTORE: printf("--restore-mark "); print_mask("--mask ", markinfo->mask); break; default: printf("ERROR: UNKNOWN CONNMARK MODE "); break; } }
/* ---------------------------------------------------------------------- */ void do_block(void) { int flags = 0; sigset_t set, old; int rc; int trace_fd = 0; int i; switch (options.sigio_cntl) { case OPT_SIGIO_USE_PROCMASK: #ifdef __CYGWIN__ printf("do_block: option not supported in CYGWIN version\n"); exit(1); #else PRINT_TIME(NOFD, &tnow, &tprev, "do_block: changing procmask"); if (in_handler) { DEBG(MSG_INTR, "do_block: in_handler = 1\n"); /* printf("do_block - in handler - sigaddset\n"); */ assert(uc != 0); #ifdef MACROBUG print_mask("do_block: uc_sigmask was \n", (sigset_t *) & uc->uc_sigmask); TRACE(EVT_CHANGESET, trace_fd = 0; rc = sigaddset((sigset_t *) & (uc->uc_sigmask), SIGIO);); print_mask("do_block: uc_sigmask is now\n", (sigset_t *) & uc->uc_sigmask); #else print_mask("do_block: uc_sigmask was \n", &uc->uc_sigmask); TRACE(EVT_CHANGESET, trace_fd = 0; rc = sigaddset(&(uc->uc_sigmask), SIGIO);); print_mask("do_block: uc_sigmask is now\n", &uc->uc_sigmask); #endif /* MACROBUG */ } else {
/* Prints out the target info. */ static void print(const struct ipt_ip *ip, const struct ipt_entry_target *target, int numeric) { const struct ipt_connmark_target_info *markinfo = (const struct ipt_connmark_target_info *)target->data; switch (markinfo->mode) { case IPT_CONNMARK_SET: case IPT_CONNMARK_SET_RETURN: printf("CONNMARK set%s ", (markinfo->mode == IPT_CONNMARK_SET_RETURN) ? "-return" : ""); // printf("CONNMARK set "); print_mark(markinfo->mark); print_mask("/", markinfo->mask); printf(" "); break; case IPT_CONNMARK_SAVE: printf("CONNMARK save "); print_mask("mask ", markinfo->mask); printf(" "); break; case IPT_CONNMARK_RESTORE: printf("CONNMARK restore "); print_mask("mask ", markinfo->mask); break; default: printf("ERROR: UNKNOWN CONNMARK MODE "); break; } }
/* Prints out the target info. */ static void CONNMARK_print(const void *ip, const struct xt_entry_target *target, int numeric) { const struct xt_connmark_target_info *markinfo = (const struct xt_connmark_target_info *)target->data; switch (markinfo->mode) { case XT_CONNMARK_SET: printf("CONNMARK set "); print_mark(markinfo->mark); print_mask("/", markinfo->mask); printf(" "); break; case XT_CONNMARK_SAVE: printf("CONNMARK save "); print_mask("mask ", markinfo->mask); printf(" "); break; case XT_CONNMARK_RESTORE: printf("CONNMARK restore "); print_mask("mask ", markinfo->mask); break; default: printf("ERROR: UNKNOWN CONNMARK MODE "); break; } }
void print_masks(void) { int i; for(i=0; i<4; i++) { print_mask(i); } }
int setlogmask(int maskpri) { int oldlogmask = logmask; logmask = maskpri; msgheader(); fputs("setlogmask(", stderr); print_mask(maskpri, priority_masks, sizeof(priority_masks)/sizeof(priority_masks[0])); fputs(")\n", stderr); return oldlogmask; }
/* * Handles an inotify event. Each registered application with a watch * that is evaluated as a match when compared to the event that is * getting handled here will be sent a message with the relevant data * from this event (it may depend on the state information kept when the * watch is created by the client application). * * There may also be internal actions triggered from this handler. They * include things that will need to occur to maintain the state of the * galaxy watch list, such as: * - Adding a new directory * - Removing an existing watch directory * - Unmounting of a directory */ static void * ihandler_thread(void *arg) { struct inotify_event *event; char *dirname; int err; event = (struct inotify_event *)arg; #ifdef DEBUG_IHANDLER_THREAD err_msg("DEBUG[ihandler_thread]: Inotify event handler\n"); err_msg(" + Event watch descriptor = %d\n", event->wd); err_msg(" + Event mask = 0x%x\n", event->mask); print_mask(event->mask); #endif pthread_mutex_lock(&inotify_wds_mutex); dirname = g_hash_table_lookup(inotify_wds, &(event->wd)); pthread_mutex_unlock(&inotify_wds_mutex); /* Check for NULL'ness. Shouldn't happend. Abort if it occurs. */ if (dirname == NULL) { err_msg("error[ihandler_thread]: Hash table lookup returned NULL.\n"); return NULL; } #ifdef DEBUG_IHANDLER_THREAD err_msg(" + dirname = %s\n", dirname); #endif /* Append dirname + '/' + the event filename (if it exists). */ char filename[strlen(dirname) + event->len + 2]; filename[0] = '\0'; strcat(filename, dirname); if (event->len) { if (filename[strlen(filename) - 1] != '/') strcat(filename, "/"); strcat(filename, event->name); } #ifdef DEBUG_IHANDLER_THREAD err_msg(" + filename = %s\n", filename); #endif /* Handle any internal actions for this event. */ err = handle_internal_actions(event, filename); if (err < 0) err_msg("warning[ihandler_thread]: Unable to handle internal actions.\n"); /* Search list of galaxy watches for matching event(s). */ find_matching_events(filename, event->mask); free(event); return NULL; }
void omp_report_mask(){ int nthrds, thrd; //Thread info int ncpus, nel_set; static int ** proc_mask; int i,j, ierr; char * dummy; thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN); if(omp_get_num_procs() != ncpus){ printf("ERROR: ncpus_by_omp=%d, ncpus_sched=%d\n",omp_get_num_procs(),ncpus); exit(1); } #pragma omp single { proc_mask = malloc(sizeof(int*)*nthrds); for(i=0;i<nthrds;i++) proc_mask[i] = malloc(sizeof(int)*ncpus ); for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) proc_mask[i][j] =0; } ierr = boundto(&nel_set,proc_mask[thrd]); #pragma omp barrier #pragma omp single { print_mask(1, dummy, 0, 0,0, ncpus, nthrds,1, proc_mask[thrd]); //print header for(thrd=0;thrd<nthrds;thrd++){ print_mask(0, dummy, 0, thrd,0, ncpus, nthrds,1, proc_mask[thrd]); } } }
void openlog(const char *ident, int logopt, int facility) { msgheader(); fprintf(stderr, "openlog(%s, ", ident ? ident : "<none>"); print_mask(logopt, logopts, sizeof(logopts)/sizeof(logopts[0])); const char *facility_str = get_facility_str(facility); if (facility_str) { fprintf(stderr, ", %s)\n", facility_str); } else { fprintf(stderr, ", 0x%02X)\n", facility); } }
/** * @brief * RETURNS THE ACCESS INFO. * * @param[in] acc - pointer to access info * @param[in] len - length of info * * @return string * @retval access info success * @retval EMPTY STRING error * */ char * accessinfo_values(struct accessinfo *acc, int len) { int i; static char buf[512]; static char msg[LOG_BUF_SIZE]; strcpy(msg, ""); for (i=0; i < len; i++) { if (acc[i].group != NULL) { sprintf(buf, "acc[%d]=(grp=%s,mask=%s) ", i, (acc[i].group?acc[i].group:"null"), print_mask(acc[i].mask)); if ((strlen(msg) + strlen(buf)) < 4095) strcat(msg, buf); } } return (msg); }
static void * receive_notifications(void *arg) { struct galaxy_t *galaxy; struct galaxy_event_t *gevent; galaxy = (struct galaxy_t *)arg; while (1) { err_msg("Receiving galaxy event..."); gevent = galaxy_receive(galaxy); if (gevent == NULL) { err_msg("warning[receive_notifications]: gevent is NULL!"); } err_msg("gevent->mask = %d gevent->name = %s gvent->timestamp = %d\n", gevent->mask, gevent->name, gevent->timestamp); print_mask(gevent->mask); } return NULL; }
int hybrid_report_mask(){ // General int i,j,ierr; int id, rid,tid; int in_mpi, in_omp; int thrd, nthrds; int ncpus, nel_set; // Mask storage static int ** omp_proc_mask; static int * omp_mask_pac; char *dummy; // MPI specific Variables int rank, nranks; MPI_Request *request; MPI_Status *status; static int multi_node = 0; static char *all_names; static int max_name_len; int name_len; char proc_name[MPI_MAX_PROCESSOR_NAME]; char l,p; int tpc; // hwthreads/core Maskopts opts; // get print_speed fast or slow (f|c); listing cores or SMT (c|s) p = opts.get_p(); l = opts.get_l(); tpc=get_threads_per_node(); // In MPI and parallel region ? MPI_Initialized(&in_mpi); in_omp = omp_in_parallel(); if(in_mpi == 0){ printf("ERROR: ***** Must call hybrid_report_mask() in MPI program. ***** \n"); exit(1); } // Get rank number & no of ranks via MPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nranks); if(in_omp == 0){ if(rank == 0){ printf(" ***** When using 1 thread, Intel OpenMP MAY report " "\"not in a parallel region\" (Uh!)***** \n"); printf(" ***** Each row will only have a rank number (no \"0\" thread_id). \n"); printf("WARNING: ***** Unspecified results if hybrid_report_mask " "not called in parallel region of MPI code section. ***** \n"); } } thrd = omp_get_thread_num(); // thread id nthrds = omp_get_num_threads(); // Number of Threads // Get number of cpus (this gives no. // of cpu_ids in /proc/cpuinfo) ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN); // Working only with MPI processes (masters) #pragma omp master { // Get a list of nodes from all ranks. MPI_Get_processor_name(proc_name,&name_len); MPI_Allreduce(&name_len, &max_name_len, 1,MPI_INT, MPI_MAX, MPI_COMM_WORLD); all_names = (char *) malloc(sizeof(int*)*nranks*(max_name_len+1)); MPI_Gather( proc_name, max_name_len+1 , MPI_CHAR, all_names, max_name_len+1, MPI_CHAR, 0, MPI_COMM_WORLD); // If multiple nodes, make muti_node non-zero. if(rank == 0){ for(id=0;id<nranks;id++){ if( strcmp(&all_names[id*(max_name_len+1)],&all_names[0]) ) multi_node++; } } // Create shared storage for masks (only master allocates) omp_proc_mask = (int **) malloc(sizeof(int*)*nthrds); for(i=0;i<nthrds;i++) omp_proc_mask[i] = (int * ) malloc(sizeof(int )*ncpus ); for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) omp_proc_mask[i][j] =0; } #pragma omp barrier #pragma omp critical // (boundto -- may not be thread safe) ierr = boundto(&nel_set,omp_proc_mask[thrd]); #pragma omp barrier #pragma omp master { omp_mask_pac = (int *) malloc(sizeof(int)*nranks*nthrds*ncpus); // need packing space for mpi send/recv if(rank == 0){ request = (MPI_Request *) malloc(sizeof(MPI_Request)*nranks); status = (MPI_Status *) malloc(sizeof(MPI_Status )*nranks); print_mask(1, dummy, multi_node, 0, 0, ncpus, nranks,nthrds, omp_proc_mask[0],tpc,l); //print header fflush(stdout); for(tid=0;tid<nthrds;tid++){ print_mask(0, &all_names[tid*(max_name_len+1)], multi_node, 0,tid, ncpus, nranks,nthrds, omp_proc_mask[tid],tpc,l); } fflush(stdout); for(rid=1;rid<nranks;rid++){ // Receive other rank's packed mask arrays MPI_Irecv(&omp_mask_pac[rid*nthrds*ncpus], nthrds*ncpus, MPI_INT, rid, 99, MPI_COMM_WORLD, &request[rid-1]); } MPI_Waitall(nranks-1,&request[0],&status[0]); for(rid=1;rid<nranks;rid++){ // Print for each rank for(tid=0;tid<nthrds;tid++){ print_mask(0, &all_names[tid*(max_name_len+1)], multi_node, rid,tid, ncpus, nranks,nthrds, &omp_mask_pac[rid*nthrds*ncpus + tid*ncpus],tpc,l); if(p == 's') ierr=usleep(300000); } } if(nranks*nthrds > 50) print_mask(2, dummy, multi_node, 0, 0, ncpus, nranks,nthrds, omp_proc_mask[0],tpc,l); //print header fflush(stdout); } // end root printing else{ //all non-root ranks // Pack up the ranks' mask arrays (Uh, should have made one array from beginning!) for( tid=0;tid<nthrds;tid++){ for( id=0; id<ncpus; id++) omp_mask_pac[(tid*ncpus)+id] = omp_proc_mask[tid][id]; if(p == 's') ierr=usleep(300000); } // Send to root MPI_Send(omp_mask_pac, nthrds*ncpus, MPI_INT, 0, 99, MPI_COMM_WORLD); } // end non-root printing // Return allocated space for(i=0;i<nthrds;i++) free(omp_proc_mask[i]); free(omp_proc_mask); free(omp_mask_pac); if(rank == 0 ){ free(request); free(status);} free(all_names); } // end of Master #pragma omp barrier // JIC, what all threads leaving at the same time. }
struct constraint * sec_flow_separator ( struct comp ** comp_hookp, /* IN/OUT - congested component(s) */ double * x, /* IN - the LP solution to separate */ bitmap_t * edge_mask, /* IN - subset of valid edges */ struct bbinfo * bbip, /* IN - branch-and-bound info */ struct constraint * cp /* IN - existing constraints */ ) { int i; int j; int t; int kmasks; struct comp * comp; struct cinfo * cip; int * ip1; int * ip2; double z; bitmap_t * S; bitmap_t * RS; cip = bbip -> cip; #if 0 plot_lp_solution ("LP solution to separate", x, cip, BIG_PLOT); #endif S = NEWA (2 * cip -> num_vert_masks, bitmap_t); RS = S + cip -> num_vert_masks; for (;;) { comp = *comp_hookp; if (comp EQ NULL) break; if (comp -> num_verts <= 0) { /* Free up this component... */ *comp_hookp = comp -> next; comp -> next = NULL; free_congested_component (comp); continue; } if (comp -> num_verts EQ 1) { /* Because of the total-degree constraint, this */ /* cannot happen unless something is very wrong! */ fatal ("sec_flow_separator: Bug 1."); } if (comp -> num_verts <= SEC_ENUM_LIMIT) { /* We have whittled this component down to */ /* something small. Use brute force on the */ /* rest... */ cp = enumerate_all_subtours (comp, cp, bbip); /* Free up this component... */ *comp_hookp = comp -> next; comp -> next = NULL; free_congested_component (comp); continue; } /* Find the LEAST congested vertex. this is the one */ /* we are going to try to force into the solution, */ /* since that is the one we would like to delete from */ /* the set afterward... */ t = find_least_congested_vertex (NULL, comp); #if 0 tracef (" %% -------------------------" "-------------------------\n" " %% separating comp with %d verts, %d edges," " forcing vertex %d\n" " %% -------------------------" "-------------------------\n", comp -> num_verts, comp -> num_edges, comp -> rverts [t] [0]); #endif /* Find worst SEC violation involving vertex t. */ z = do_flow_problem (comp, t, S); #if 0 #if 0 kmasks = cip -> num_vert_masks; for (i = 0; i < kmasks; i++) { RS [i] = 0; } for (i = 0; i < comp -> num_verts; i++) { if (NOT BITON (S, i)) continue; ip1 = comp -> rverts [i]; ip2 = comp -> rverts [i + 1]; while (ip1 < ip2) { j = *ip1++; SETBIT (RS, j); } } print_mask (" %% S =", RS, cip -> num_verts); #else print_mask (" %% S =", S, comp -> num_verts); #endif tracef (" %% f(S) = %-24.15g\n", z); #endif if (z < (1.0 - FUZZ)) { /* Add new violated constraint to the list... */ cp = check_component_subtour (S, comp, cp, x, edge_mask, bbip); } /* We have found the worst violation (if any) involving */ /* vertex t. We can now eliminate t from further */ /* consideration... */ *comp_hookp = delete_vertex_from_component (t, comp); } free ((char *) S); return (cp); }
void print_eglConfig (EGLDisplay dpy, EGLConfig config) { EGLint val; printf ("# EGLconfig:\n"); print_eInt (EGL_BUFFER_SIZE); print_eInt (EGL_RED_SIZE); print_eInt (EGL_GREEN_SIZE); print_eInt (EGL_BLUE_SIZE); print_eInt (EGL_LUMINANCE_SIZE); print_eInt (EGL_ALPHA_SIZE); print_eInt (EGL_ALPHA_MASK_SIZE); print_eBool (EGL_BIND_TO_TEXTURE_RGB); print_eBool (EGL_BIND_TO_TEXTURE_RGBA); print_enum_Head (EGL_COLOR_BUFFER_TYPE); print_enum (EGL_RGB_BUFFER); print_enum (EGL_LUMINANCE_BUFFER); print_enum_Foot (); print_enum_Head (EGL_CONFIG_CAVEAT); print_enum (EGL_NONE); print_enum (EGL_SLOW_CONFIG); print_enum (EGL_NON_CONFORMANT_CONFIG); print_enum_Foot (); print_eInt (EGL_CONFIG_ID); print_mask_Head (EGL_CONFORMANT); print_mask (EGL_OPENGL_ES_BIT); print_mask (EGL_OPENGL_ES2_BIT); print_mask (EGL_OPENVG_BIT); print_mask (EGL_OPENGL_BIT); print_mask_Foot (); print_eInt (EGL_DEPTH_SIZE); print_eInt (EGL_LEVEL); print_eInt (EGL_MATCH_NATIVE_PIXMAP); print_eInt (EGL_MAX_PBUFFER_WIDTH); print_eInt (EGL_MAX_PBUFFER_HEIGHT); print_eInt (EGL_MAX_PBUFFER_PIXELS); print_eInt (EGL_MAX_SWAP_INTERVAL); print_eInt (EGL_MIN_SWAP_INTERVAL); print_eBool (EGL_NATIVE_RENDERABLE); print_eInt (EGL_NATIVE_VISUAL_ID); print_eInt (EGL_NATIVE_VISUAL_TYPE); print_mask_Head (EGL_RENDERABLE_TYPE); print_mask (EGL_OPENGL_ES_BIT); print_mask (EGL_OPENGL_ES2_BIT); print_mask (EGL_OPENVG_BIT); print_mask (EGL_OPENGL_BIT); print_mask_Foot (); print_eInt (EGL_SAMPLE_BUFFERS); print_eInt (EGL_SAMPLES); print_eInt (EGL_STENCIL_SIZE); print_mask_Head (EGL_SURFACE_TYPE); print_mask (EGL_PBUFFER_BIT); print_mask (EGL_PIXMAP_BIT); print_mask (EGL_WINDOW_BIT); print_mask (EGL_VG_COLORSPACE_LINEAR_BIT); print_mask (EGL_VG_ALPHA_FORMAT_PRE_BIT); print_mask (EGL_MULTISAMPLE_RESOLVE_BOX_BIT); print_mask (EGL_SWAP_BEHAVIOR_PRESERVED_BIT); print_mask_Foot (); print_mask_Head (EGL_TRANSPARENT_TYPE); print_mask (EGL_TRANSPARENT_RGB); print_mask_Foot (); print_eInt (EGL_TRANSPARENT_RED_VALUE); print_eInt (EGL_TRANSPARENT_GREEN_VALUE); print_eInt (EGL_TRANSPARENT_BLUE_VALUE); printf ("# END\n"); }
int hybrid_report_mask(void){ int thrd, nthrds; int rank, nranks; static int multi_node = 0; int ncpus, nel_set; static int ** omp_proc_mask; static int * omp_mask_pac; char *dummy; char proc_name[MPI_MAX_PROCESSOR_NAME]; static char * all_names; int name_len; static int max_name_len; // General int i,j,ierr; int id, rid,tid; int in_mpi, in_omp; // Mask storage int ** proc_mask; static int * all_masks=0; MPI_Initialized(&in_mpi); in_omp = omp_in_parallel(); if(in_mpi != 0 && in_omp == 0){ // Get number of cpus (this gives no. of cpu_ids in /proc/cpuinfo) // Get rank number & no of ranks via MPI ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &nranks); // Create a 2-D array for mask // proc_mask[rank][ncpus] -- for simplicity, size is [ncpus][ncpus] // Thinking ahead for hybrid code. // zero out proc_mask[ncpus][ncpus] // I could have made proc_mask a single array (proc_mask[ncpus]); but didn't // This is a hold-over from the openmp version that holds everything for all threads. // For MPI I made a continguous collection array (all_masks). proc_mask = malloc(sizeof(int*)*ncpus); for(i=0;i<ncpus;i++) proc_mask[i] = malloc(sizeof(int)*ncpus); for(i=0;i<ncpus;i++) for(j=0;j<ncpus;j++) proc_mask[i][j] =0; all_masks = (int *) malloc(sizeof(int)*ncpus*ncpus); // get map for this processor ierr=boundto(&nel_set,proc_mask[rank]); // Gather information to rank 0 MPI_Gather( proc_mask[rank], ncpus, MPI_INT, all_masks, ncpus, MPI_INT, 0, MPI_COMM_WORLD); // Get a list of nodes from all ranks. MPI_Get_processor_name(proc_name,&name_len); MPI_Allreduce(&name_len, &max_name_len, 1,MPI_INT, MPI_MAX, MPI_COMM_WORLD); all_names = malloc(sizeof(int*)*nranks*(max_name_len+1)); MPI_Gather( proc_name, max_name_len+1 , MPI_CHAR, all_names, max_name_len+1, MPI_CHAR, 0, MPI_COMM_WORLD); // If multiple nodes, make muti_node not equal to 0. if(rank == 0) for(id=0;id<nranks;id++){ if( strcmp(&all_names[id*(max_name_len+1)],&all_names[0]) ) multi_node++; } } // End of Pure MPI part if(in_mpi != 0 && in_omp != 0){ if(all_masks == 0) { printf("ERROR: ***** You must call hybrid_report_mask() in a Pure MPI region first. ***** \n"); exit(1); } thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN); #pragma omp single { omp_proc_mask = malloc(sizeof(int*)*nthrds); for(i=0;i<nthrds;i++) omp_proc_mask[i] = malloc(sizeof(int)*ncpus ); for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) omp_proc_mask[i][j] =0; } #pragma omp critical ierr = boundto(&nel_set,omp_proc_mask[thrd]); #pragma omp barrier MPI_Comm_size(MPI_COMM_WORLD, &nranks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); #pragma omp master { omp_mask_pac = (int *) malloc(sizeof(int)*nranks*ncpus); // need packing space for mpi send/recv if(rank == 0){ print_mask(1, dummy, multi_node, 0, 0, ncpus, nthrds,nranks, omp_proc_mask[0]); //print header fflush(stdout); for(tid=0;tid<nthrds;tid++){ print_mask(0, &all_names[tid*(max_name_len+1)], multi_node, 0,tid, ncpus, nthrds,nranks, omp_proc_mask[tid]); } fflush(stdout); for(rid=1;rid<nranks;rid++){ // Receive other rank's packed mask arrays MPI_Recv(omp_mask_pac, nthrds*ncpus, MPI_INT, rid, 99, MPI_COMM_WORLD, MPI_STATUS_IGNORE); for(tid=0;tid<nthrds;tid++){ print_mask(0, &all_names[rid*(max_name_len+1)], multi_node, rid,tid, ncpus, nthrds,nranks, &omp_mask_pac[tid*ncpus]); } fflush(stdout); } // rank loop } // end root printing else{ //all other ranks // All non-root ranks send to root. for(rid=1;rid<nranks;rid++){ // Pack up the ranks' mask arrays (Uh, should have made one array from beginning!) for( tid=0;tid<nthrds;tid++){ for( id=0; id<ncpus; id++) omp_mask_pac[(tid*ncpus)+id] = omp_proc_mask[tid][id]; } // Send to root MPI_Send(omp_mask_pac, nthrds*ncpus, MPI_INT, 0, 99, MPI_COMM_WORLD); } //all other ranks } // end non-root printing MPI_Barrier(MPI_COMM_WORLD); } // end of Master #pragma omp barrier } // end of OpenMP part }
int set_watch_backend_inotify(struct notifywatch_struct *watch) { int wd=0; uint32_t inotify_mask; unsigned int error=0; logoutput("set_watch_backend_inotify"); /* first translate the fsnotify mask into a inotify mask */ inotify_mask=translate_mask_fsnotify_to_inotify(watch->mask); if (inotify_mask>0) { char maskstring[128]; print_mask(inotify_mask, maskstring, 128); logoutput("set_watch_backend_inotify: call inotify_add_watch on path %s and mask %i/%s", watch->pathinfo.path, inotify_mask, maskstring); /* add some sane flags and all events: */ inotify_mask |= IN_DONT_FOLLOW | IN_ALL_EVENTS; #ifdef IN_EXCL_UNLINK inotify_mask |= IN_EXCL_UNLINK; #endif wd=inotify_add_watch(xdata_inotify.fd, watch->pathinfo.path, inotify_mask); if ( wd==-1 ) { error=errno; logoutput("set_watch_backend_inotify: setting inotify watch on %s gives error: %i (%s)", watch->pathinfo.path, error, strerror(error)); } else { struct inotify_watch_struct *inotify_watch=NULL; inotify_watch=lookup_inotify_watch_wd(wd); if (inotify_watch) { if (!(inotify_watch->watch==watch)) { logoutput("set_watch_backend_inotify: internal error, inotify watch (wd=%i)(path=%s)", wd, watch->pathinfo.path); inotify_watch->watch=watch; watch->backend=(void *) inotify_watch; } } else { inotify_watch=malloc(sizeof(struct inotify_watch_struct)); if (inotify_watch) { inotify_watch->wd=wd; inotify_watch->watch=watch; add_watch_inotifytable(inotify_watch); watch->backend=(void *) inotify_watch; } } } } else if (inotify_mask==0) { logoutput("set_watch_backend_inotify: mask %i", inotify_mask); } else { logoutput("set_watch_backend_inotify: mask %i", inotify_mask); } out: return (error>0) ? error : wd; }