void set_logfile(const char *file) { char *path; char _path[1024] = {}; char tmp[1024] = {}; int v_haspath = has_path(file); if (!v_haspath) { if ((path = getenv("LOG")) == NULL && (path = getenv("log")) == NULL) memcpy(_path, " ", 1); else { memcpy(_path, path, strlen(path)); if (_path[strlen(path) - 1] != '/' || _path[strlen(path) - 1] != '\\') { _path[strlen(path)] = '/'; _path[strlen(path) + 1] = 0; } } } memset(debugfile, 0, sizeof(debugfile)); remove_suffix(file, tmp); if (v_haspath) sprintf(debugfile, "%s.log", tmp); else sprintf(debugfile, "%s/%s.log", _path, tmp); }
const boost::filesystem::path& path() const { if (!has_path()) BOOST_THROW_EXCEPTION(std::logic_error("This model does not have a path.")); return *m_path; }
//---------------------------------------------------------------------------// bool Schema::compatible(const Schema &s) const { index_t dt_id = m_dtype.id(); index_t s_dt_id = s.dtype().id(); if(dt_id != s_dt_id) return false; bool res = true; if(dt_id == DataType::OBJECT_ID) { // each of s's entries that match paths must have dtypes that match std::map<std::string, index_t>::const_iterator itr; for(itr = s.object_map().begin(); itr != s.object_map().end() && res; itr++) { // make sure we actually have the path if(has_path(itr->first)) { // use index to fetch the child from the other schema const Schema &s_chld = s.child(itr->second); // fetch our child by name const Schema &chld = fetch_child(itr->first); // do compat check res = chld.compatible(s_chld); } } } else if(dt_id == DataType::LIST_ID) { // each of s's entries dtypes must match index_t s_n_chd = s.number_of_children(); // can't be compatible in this case if(number_of_children() < s_n_chd) return false; const std::vector<Schema*> &s_lst = s.children(); const std::vector<Schema*> &lst = children(); for(index_t i = 0; i < s_n_chd && res; i++) { res = lst[i]->compatible(*s_lst[i]); } } else { res = m_dtype.compatible(s.dtype()); } return res; }
int main(int argc, char** argv) { int p; float p_step; int i; int size; int* f; int paths_found; if(argc == 1) { p_step = DEFAULT_P_STEP; } else { p_step = strtof(argv[1], NULL); if(p_step==0.0) { fprintf(stderr, "Usage:\n\t%s [p]\n\np: Probability (default: %g)\n", argv[0], DEFAULT_P_STEP); exit(1); } } for(size=10; size <= MAX_SIZE; size*=10) { f = malloc(size*size*sizeof(int)); if(f == NULL) { fprintf(stderr, "Memory Error"); exit(1); } for(p=0; p<1.0/p_step; p++) { paths_found = 0; for(i=0; i<N_STAT; i++) { fill_array(f, size, p*p_step); paths_found += has_path(f, size); } printf("%f %g %d\n", p*p_step, (float)paths_found/N_STAT, paths_found); } printf("\n\n"); free(f); } return 0; }
//---------------------------------------------------------------------------// Schema & Schema::fetch(const std::string &path) { // fetch w/ path forces OBJECT_ID init_object(); std::string p_curr; std::string p_next; utils::split_path(path,p_curr,p_next); // handle parent // check for parent if(p_curr == "..") { if(m_parent != NULL) // TODO: check for error (no parent) return m_parent->fetch(p_next); } if (!has_path(p_curr)) { Schema* my_schema = new Schema(); my_schema->m_parent = this; children().push_back(my_schema); object_map()[p_curr] = children().size() - 1; object_order().push_back(p_curr); } index_t idx = child_index(p_curr); if(p_next.empty()) { return *children()[idx]; } else { return children()[idx]->fetch(p_next); } }
char has_path( int u ){ int v, w, oldFlow; if( u == T ) return 1; cr[u] = 0; for( w = 0; w < gr[u]; w++ ){ v = graph[u][w]; if( flow[u][v] && cr[v] ){ oldFlow = curFlow; if( flow[u][v] < curFlow ) curFlow = flow[u][v]; if( has_path( v ) ){ flow[u][v] -= curFlow; flow[v][u] += curFlow; cr[u] = 1; return 1; } else curFlow = oldFlow; } } cr[u] = 1; return 0; }
//---------------------------------------------------------------------------// bool Schema::equals(const Schema &s) const { index_t dt_id = m_dtype.id(); index_t s_dt_id = s.dtype().id(); if(dt_id != s_dt_id) return false; bool res = true; if(dt_id == DataType::OBJECT_ID) { // all entries must be equal std::map<std::string, index_t>::const_iterator itr; for(itr = s.object_map().begin(); itr != s.object_map().end() && res; itr++) { if(has_path(itr->first)) { index_t s_idx = itr->second; res = s.children()[s_idx]->equals(fetch_child(itr->first)); } else { res = false; } } for(itr = object_map().begin(); itr != object_map().end() && res; itr++) { if(s.has_path(itr->first)) { index_t idx = itr->second; res = children()[idx]->equals(s.fetch_child(itr->first)); } else { res = false; } } } else if(dt_id == DataType::LIST_ID) { // all entries must be equal index_t s_n_chd = s.number_of_children(); // can't be compatible in this case if(number_of_children() != s_n_chd) return false; const std::vector<Schema*> &s_lst = s.children(); const std::vector<Schema*> &lst = children(); for(index_t i = 0; i < s_n_chd && res; i++) { res = lst[i]->equals(*s_lst[i]); } } else { res = m_dtype.equals(s.dtype()); } return res; }
/** Check for a path to an adjacent cell; see maze.h. */ bool has_wall(maze_t* m, cell_t* c, unsigned char d) { return !has_path(m, c, d) ; }
int main(){ int i, j, a, b, v, f, MAX; char c; while( 1 ){ hell: scanf("%d %d %d", &N, &M, &G ); if( N+M+G == 0 ) return 0; for( a = 0; a < N; a++ ){ points[a] = 0; for( b = a; b < N; b++ ) played[a][b] = 0; } for( i = 0; i < G; i++ ){ scanf("%d %c %d", &a, &c, &b ); played[a][b]++; played[b][a]++; if( c == '=' ) points[a]++, points[b]++; else if( c == '<' ) points[b] += 2; } v = 0; for( i = 1; i < N; i++ ) for( j = i+1; j < N; j++ ) if( played[i][j] != M ) v++; T = N+v; for( i = 0; i <= T; i++ ) gr[i] = 0, cr[i] = 1; v = 0; MAX = 0; for( i = 1; i < N; i++ ) for( j = i+1; j < N; j++ ) if( played[i][j] != M ){ a = N+v; graph[0][gr[0]++] = a; graph[a][gr[a]++] = 0; flow[0][a] = 2*(M-played[i][j]); flow[a][0] = 0; MAX += flow[0][a]; graph[a][gr[a]++] = i; graph[a][gr[a]++] = j; graph[i][gr[i]++] = a; graph[j][gr[j]++] = a; flow[a][i] = flow[0][a]; flow[a][j] = flow[0][a]; flow[i][a] = 0; flow[j][a] = 0; v++; } for( i = 1; i < N; i++ ) points[0] += 2*( M - played[0][i] ); for( i = 1; i < N; i++ ){ graph[i][gr[i]++] = T; graph[T][gr[T]++] = i; a = points[0] - points[i] - 1; if( a < 0 ){ puts("N"); goto hell; } flow[i][T] = a; flow[T][i] = 0; } f = 0; curFlow = INF; while( has_path( 0 ) ){ f += curFlow; curFlow = INF; } if( f == MAX ) puts("Y"); else puts("N"); } return 0; }
MYRG_INFO *myrg_open(const char *name, int mode, int handle_locking) { int save_errno,errpos=0; uint files= 0, i, dir_length, length, UNINIT_VAR(key_parts), min_keys= 0; ulonglong file_offset=0; char name_buff[FN_REFLEN*2],buff[FN_REFLEN],*end; MYRG_INFO *m_info=0; File fd; IO_CACHE file; MI_INFO *isam=0; uint found_merge_insert_method= 0; size_t name_buff_length; my_bool bad_children= FALSE; DBUG_ENTER("myrg_open"); memset(&file, 0, sizeof(file)); if ((fd= mysql_file_open(rg_key_file_MRG, fn_format(name_buff, name, "", MYRG_NAME_EXT, MY_UNPACK_FILENAME|MY_APPEND_EXT), O_RDONLY | O_SHARE, MYF(0))) < 0) goto err; errpos=1; if (init_io_cache(&file, fd, 4*IO_SIZE, READ_CACHE, 0, 0, MYF(MY_WME | MY_NABP))) goto err; errpos=2; dir_length=dirname_part(name_buff, name, &name_buff_length); while ((length=my_b_gets(&file,buff,FN_REFLEN-1))) { if ((end=buff+length)[-1] == '\n') end[-1]='\0'; if (buff[0] && buff[0] != '#') files++; } my_b_seek(&file, 0); while ((length=my_b_gets(&file,buff,FN_REFLEN-1))) { if ((end=buff+length)[-1] == '\n') *--end='\0'; if (!buff[0]) continue; /* Skip empty lines */ if (buff[0] == '#') { if (!strncmp(buff+1,"INSERT_METHOD=",14)) { /* Lookup insert method */ int tmp= find_type(buff + 15, &merge_insert_method, FIND_TYPE_BASIC); found_merge_insert_method = (uint) (tmp >= 0 ? tmp : 0); } continue; /* Skip comments */ } if (!has_path(buff)) { (void) strmake(name_buff+dir_length,buff, sizeof(name_buff)-1-dir_length); (void) cleanup_dirname(buff,name_buff); } else fn_format(buff, buff, "", "", 0); if (!(isam=mi_open(buff,mode,(handle_locking?HA_OPEN_WAIT_IF_LOCKED:0)))) { if (handle_locking & HA_OPEN_FOR_REPAIR) { myrg_print_wrong_table(buff); bad_children= TRUE; continue; } goto bad_children; } if (!m_info) /* First file */ { key_parts=isam->s->base.key_parts; if (!(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO) + files*sizeof(MYRG_TABLE) + key_parts*sizeof(long), MYF(MY_WME|MY_ZEROFILL)))) goto err; DBUG_ASSERT(files); m_info->open_tables=(MYRG_TABLE *) (m_info+1); m_info->rec_per_key_part=(ulong *) (m_info->open_tables+files); m_info->tables= files; files= 0; m_info->reclength=isam->s->base.reclength; min_keys= isam->s->base.keys; errpos=3; } m_info->open_tables[files].table= isam; m_info->open_tables[files].file_offset=(my_off_t) file_offset; file_offset+=isam->state->data_file_length; files++; if (m_info->reclength != isam->s->base.reclength) { if (handle_locking & HA_OPEN_FOR_REPAIR) { myrg_print_wrong_table(buff); bad_children= TRUE; continue; } goto bad_children; } m_info->options|= isam->s->options; m_info->records+= isam->state->records; m_info->del+= isam->state->del; m_info->data_file_length+= isam->state->data_file_length; if (min_keys > isam->s->base.keys) min_keys= isam->s->base.keys; for (i=0; i < key_parts; i++) m_info->rec_per_key_part[i]+= (isam->s->state.rec_per_key_part[i] / m_info->tables); } if (bad_children) goto bad_children; if (!m_info && !(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO), MYF(MY_WME | MY_ZEROFILL)))) goto err; /* Don't mark table readonly, for ALTER TABLE ... UNION=(...) to work */ m_info->options&= ~(HA_OPTION_COMPRESS_RECORD | HA_OPTION_READ_ONLY_DATA); m_info->merge_insert_method= found_merge_insert_method; if (sizeof(my_off_t) == 4 && file_offset > (ulonglong) (ulong) ~0L) { my_errno=HA_ERR_RECORD_FILE_FULL; goto err; } m_info->keys= min_keys; memset(&m_info->by_key, 0, sizeof(m_info->by_key)); /* this works ok if the table list is empty */ m_info->end_table=m_info->open_tables+files; m_info->last_used_table=m_info->open_tables; m_info->children_attached= TRUE; (void) mysql_file_close(fd, MYF(0)); end_io_cache(&file); mysql_mutex_init(rg_key_mutex_MYRG_INFO_mutex, &m_info->mutex, MY_MUTEX_INIT_FAST); m_info->open_list.data=(void*) m_info; mysql_mutex_lock(&THR_LOCK_open); myrg_open_list=list_add(myrg_open_list,&m_info->open_list); mysql_mutex_unlock(&THR_LOCK_open); DBUG_RETURN(m_info); bad_children: my_errno= HA_ERR_WRONG_MRG_TABLE_DEF; err: save_errno=my_errno; switch (errpos) { case 3: while (files) (void) mi_close(m_info->open_tables[--files].table); my_free(m_info); /* Fall through */ case 2: end_io_cache(&file); /* Fall through */ case 1: (void) mysql_file_close(fd, MYF(0)); } my_errno=save_errno; DBUG_RETURN (NULL); }
int main(int argc, char *argv[]) { extern int optind; int i = 0; int c; gp_boolean usage = false; gp_boolean update_archive = false; gp_boolean no_index = false; gp_archive_type *object = NULL; gp_init(); /* symbols are case sensitive */ definition_tbl = push_symbol_table(NULL, false); symbol_index = push_symbol_table(NULL, false); while ((c = GETOPT_FUNC) != EOF) { switch (c) { case '?': case 'h': usage = true; break; case 'c': select_mode(ar_create); break; case 'd': select_mode(ar_delete); break; case 'n': no_index = true; break; case 'q': gp_quiet = true; break; case 'r': select_mode(ar_replace); break; case 's': select_mode(ar_symbols); break; case 't': select_mode(ar_list); break; case 'v': fprintf(stderr, "%s\n", GPLIB_VERSION_STRING); exit(0); break; case 'x': select_mode(ar_extract); break; } if (usage) break; } if (optind < argc) { /* fetch the library name */ state.filename = argv[optind++]; /* some operations require object filenames or membernames */ for ( ; optind < argc; optind++) { state.objectname[state.numobjects] = argv[optind]; if (state.numobjects >= MAX_OBJ_NAMES) { gp_error("exceeded maximum number of object files"); break; } state.numobjects++; } } else { usage = true; } /* User did not select an operation */ if (state.mode == ar_null) { usage = true; } /* User did not provide object names */ if ((state.mode != ar_list) && (state.mode != ar_symbols) && (state.numobjects == 0)) { usage = true; } if (usage) { show_usage(); } /* if we are not creating a new archive, we have to read an existing one */ if (state.mode != ar_create) { if (gp_identify_coff_file(state.filename) != archive_file) { gp_error("\"%s\" is not a valid archive file", state.filename); exit(1); } else { state.archive = gp_archive_read(state.filename); } } /* process the option */ i = 0; switch (state.mode) { case ar_create: case ar_replace: while (i < state.numobjects) { if (gp_identify_coff_file(state.objectname[i]) != object_file) { gp_error("\"%s\" is not a valid object file", state.objectname[i]); break; } else { state.archive = gp_archive_add_member(state.archive, state.objectname[i], object_name(state.objectname[i])); } i++; } update_archive = true; break; case ar_delete: while (i < state.numobjects) { if (has_path(state.objectname[i])) { gp_error("invalid object name \"%s\"", state.objectname[i]); break; } object = gp_archive_find_member(state.archive, state.objectname[i]); if (object == NULL) { gp_error("object \"%s\" not found", state.objectname[i]); break; } else { state.archive = gp_archive_delete_member(state.archive, state.objectname[i]); } i++; } update_archive = true; break; case ar_extract: while (i < state.numobjects) { if (has_path(state.objectname[i])) { gp_error("invalid object name \"%s\"", state.objectname[i]); break; } object = gp_archive_find_member(state.archive, state.objectname[i]); if (object == NULL) { gp_error("object \"%s\" not found", state.objectname[i]); break; } else { if (gp_archive_extract_member(state.archive, state.objectname[i])) { gp_error("can't write file \"%s\"", state.objectname[i]); break; } } i++; } break; case ar_list: gp_archive_list_members(state.archive); break; case ar_symbols: if (gp_archive_have_index(state.archive) == 0) { gp_error("this archive has no symbol index"); } else { gp_archive_read_index(symbol_index, state.archive); gp_archive_print_table(symbol_index); } break; case ar_null: default: assert(0); } /* If the archive is being modified remove the old symbol index */ if (update_archive) { state.archive = gp_archive_remove_index(state.archive); } /* check for duplicate symbols */ gp_archive_make_index(state.archive, definition_tbl); /* add the symbol index to the archive */ if (update_archive && (!no_index)) { state.archive = gp_archive_add_index(definition_tbl, state.archive); } /* write the new or modified archive */ if (update_archive && (gp_num_errors == 0)) { if (gp_archive_write(state.archive, state.filename)) gp_error("can't write the new archive file"); } if (gp_num_errors > 0) return EXIT_FAILURE; else return EXIT_SUCCESS; }
MYRG_INFO *myrg_parent_open(const char *parent_name, int (*callback)(void*, const char*), void *callback_param) { MYRG_INFO *m_info; int rc; int errpos; int save_errno; int insert_method; uint length; uint dir_length; uint child_count; size_t name_buff_length; File fd; IO_CACHE file_cache; char parent_name_buff[FN_REFLEN * 2]; char child_name_buff[FN_REFLEN]; DBUG_ENTER("myrg_parent_open"); rc= 1; errpos= 0; bzero((char*) &file_cache, sizeof(file_cache)); /* Open MERGE meta file. */ if ((fd= my_open(fn_format(parent_name_buff, parent_name, "", MYRG_NAME_EXT, MY_UNPACK_FILENAME|MY_APPEND_EXT), O_RDONLY | O_SHARE, MYF(0))) < 0) goto err; /* purecov: inspected */ errpos= 1; if (init_io_cache(&file_cache, fd, 4 * IO_SIZE, READ_CACHE, 0, 0, MYF(MY_WME | MY_NABP))) goto err; /* purecov: inspected */ errpos= 2; /* Count children. Determine insert method. */ child_count= 0; insert_method= 0; while ((length= my_b_gets(&file_cache, child_name_buff, FN_REFLEN - 1))) { /* Remove line terminator. */ if (child_name_buff[length - 1] == '\n') child_name_buff[--length]= '\0'; /* Skip empty lines. */ if (!child_name_buff[0]) continue; /* purecov: inspected */ /* Skip comments, but evaluate insert method. */ if (child_name_buff[0] == '#') { if (!strncmp(child_name_buff + 1, "INSERT_METHOD=", 14)) { /* Compare buffer with global methods list: merge_insert_method. */ insert_method= find_type(child_name_buff + 15, &merge_insert_method, 2); } continue; } /* Count the child. */ child_count++; } /* Allocate MERGE parent table structure. */ if (!(m_info= (MYRG_INFO*) my_malloc(sizeof(MYRG_INFO) + child_count * sizeof(MYRG_TABLE), MYF(MY_WME | MY_ZEROFILL)))) goto err; /* purecov: inspected */ errpos= 3; m_info->open_tables= (MYRG_TABLE*) (m_info + 1); m_info->tables= child_count; m_info->merge_insert_method= insert_method > 0 ? insert_method : 0; /* This works even if the table list is empty. */ m_info->end_table= m_info->open_tables + child_count; if (!child_count) { /* Do not attach/detach an empty child list. */ m_info->children_attached= TRUE; } /* Call callback for each child. */ dir_length= dirname_part(parent_name_buff, parent_name, &name_buff_length); my_b_seek(&file_cache, 0); while ((length= my_b_gets(&file_cache, child_name_buff, FN_REFLEN - 1))) { /* Remove line terminator. */ if (child_name_buff[length - 1] == '\n') child_name_buff[--length]= '\0'; /* Skip empty lines and comments. */ if (!child_name_buff[0] || (child_name_buff[0] == '#')) continue; if (!has_path(child_name_buff)) { VOID(strmake(parent_name_buff + dir_length, child_name_buff, sizeof(parent_name_buff) - 1 - dir_length)); VOID(cleanup_dirname(child_name_buff, parent_name_buff)); } else fn_format(child_name_buff, child_name_buff, "", "", 0); DBUG_PRINT("info", ("child: '%s'", child_name_buff)); /* Callback registers child with handler table. */ if ((rc= (*callback)(callback_param, child_name_buff))) goto err; /* purecov: inspected */ } end_io_cache(&file_cache); VOID(my_close(fd, MYF(0))); VOID(pthread_mutex_init(&m_info->mutex, MY_MUTEX_INIT_FAST)); m_info->open_list.data= (void*) m_info; pthread_mutex_lock(&THR_LOCK_open); myrg_open_list= list_add(myrg_open_list, &m_info->open_list); pthread_mutex_unlock(&THR_LOCK_open); DBUG_RETURN(m_info); /* purecov: begin inspected */ err: save_errno= my_errno; switch (errpos) { case 3: my_free((char*) m_info, MYF(0)); /* Fall through */ case 2: end_io_cache(&file_cache); /* Fall through */ case 1: VOID(my_close(fd, MYF(0))); } my_errno= save_errno; DBUG_RETURN (NULL); /* purecov: end */ }