void protect_text(const char *fileName, const VersatileEncodingConfig* vec){ U_FILE *file_reader = u_fopen(vec, fileName, U_READ); if(file_reader == NULL){ fatal_error("u_fopen"); } unichar *text = read_file(file_reader); unichar *protected_text = protect_lexical_tag(text, false); free(text); u_fclose(file_reader); U_FILE *file_write = u_fopen(vec, fileName, U_WRITE); if(file_write == NULL){ fatal_error("u_fopen"); } int written = u_fwrite(protected_text, u_strlen(protected_text),file_write); if(written != (int)u_strlen(protected_text)){ fatal_error("u_fwrite"); } u_fclose(file_write); free(protected_text); }
/** * This function takes a unicode string representing a regular expression and * compiles it into a .grf file. It returns 1 in case of success; 0 otherwise. */ int reg2grf(const unichar* regexp,const char* name_grf, const VersatileEncodingConfig* vec) { if (regexp[0]=='\0') { error("You must specify a non empty regular expression\n"); return 0; } U_FILE* out=u_fopen(vec,name_grf,U_WRITE); if (out==NULL) { error("Cannot open the output file for the regular expression\n"); return 0; } struct reg2grf_info* INFO=new_reg2grf_info(); /* We create the initial and final states that must have numbers 0 and 1 */ add_state(INFO,u_strdup("<E>")); add_state(INFO,u_strdup("")); /* We print the grf header */ u_fprintf(out,"#Unigraph\n"); u_fprintf(out,"SIZE 1313 950\n"); u_fprintf(out,"FONT Times New Roman: 12\n"); u_fprintf(out,"OFONT Times New Roman:B 12\n"); u_fprintf(out,"BCOLOR 16777215\n"); u_fprintf(out,"FCOLOR 0\n"); u_fprintf(out,"ACOLOR 12632256\n"); u_fprintf(out,"SCOLOR 16711680\n"); u_fprintf(out,"CCOLOR 255\n"); u_fprintf(out,"DBOXES y\n"); u_fprintf(out,"DFRAME y\n"); u_fprintf(out,"DDATE y\n"); u_fprintf(out,"DFILE y\n"); u_fprintf(out,"DDIR y\n"); u_fprintf(out,"DRIG n\n"); u_fprintf(out,"DRST n\n"); u_fprintf(out,"FITS 100\n"); u_fprintf(out,"PORIENT L\n"); u_fprintf(out,"#\n"); int input_state; int output_state; int result=reg_2_grf(regexp,&input_state,&output_state,INFO); if (result!=1) { u_fclose(out); af_remove(name_grf); free_reg2grf_info(INFO); if (result==0) { error("Syntax error in regular expression\n"); } return 0; } /* If the compilation has successed, we must link the resulting automaton piece * to the grf's initial and final states */ add_transition(0,input_state,INFO); add_transition(output_state,1,INFO); save_states(out,INFO); free_reg2grf_info(INFO); u_fclose(out); return 1; }
void protect_special_characters(const char *text,Encoding encoding_output,int bom_output,int mask_encoding_compatibility_input){ U_FILE *source; U_FILE *destination; //fprintf(stdout,"protect special character\n"); char temp_name_file[FILENAME_MAX]; char path[FILENAME_MAX]; get_path(text,path); sprintf(temp_name_file,"%stemp",path); source = u_fopen_existing_versatile_encoding(mask_encoding_compatibility_input, text,U_READ); if( source == NULL){ perror("u_fopen\n"); fprintf(stderr,"Cannot open file %s\n",text); exit(1); } destination = u_fopen_versatile_encoding(encoding_output,bom_output,mask_encoding_compatibility_input,temp_name_file,U_WRITE); if( destination == NULL){ perror("u_fopen\n"); fprintf(stderr,"Cannot open file %s\n",temp_name_file); exit(1); } int a; a = u_fgetc(source); while(a!=EOF){ u_fputc((unichar)a,destination); if(a=='{'){ //fprintf(stdout,"opening bracket found\n"); unichar *bracket_string = get_braced_string(source); unichar *protected_bracket_string = protect_braced_string(bracket_string); //u_fprints(protected_bracket_string,destination); u_fprintf(destination,"%S",protected_bracket_string); //u_printf("%S --- ",bracket_string); //u_printf("%S\n",protected_bracket_string); free(bracket_string); free(protected_bracket_string); } a = u_fgetc(source); } u_fclose(source); u_fclose(destination); copy_file(text,temp_name_file); // should delete the 'temp' file }
/** * Loads an alphabet file and returns the associated 'Alphabet*' structure. * If 'korean' is non null, we compute the equivalences between Chinese and Hangul * characters. */ Alphabet* load_alphabet(const VersatileEncodingConfig* vec,const char* filename,int korean) { void* a=get_persistent_structure(filename); if (a!=NULL) { return (Alphabet*)a; } U_FILE* f; f=u_fopen(vec,filename,U_READ); if (f==NULL) { return NULL; } Alphabet* alphabet=new_alphabet(korean); int c; unichar lower,upper; while ((c=u_fgetc(f))!=EOF) { upper=(unichar)c; if (upper=='\n') { /* We skip empty lines */ continue; } if (upper=='#') { // we are in the case of an interval #AZ -> [A..Z] lower=(unichar)u_fgetc(f); upper=(unichar)u_fgetc(f); if (lower>upper) { error("Error in alphabet file: for an interval like #AZ, A must be before Z\n"); free_alphabet(alphabet); u_fclose(f); return NULL; } for (c=lower;c<=upper;c++) { SET_CASE_FLAG_MACRO(c,alphabet,1|2); add_letter_equivalence(alphabet,(unichar)c,(unichar)c); } u_fgetc(f); // reading the \n } else { SET_CASE_FLAG_MACRO(upper,alphabet,1); lower=(unichar)u_fgetc(f); if (lower!='\n') { SET_CASE_FLAG_MACRO(lower,alphabet,2); u_fgetc(f); // reading the \n add_letter_equivalence(alphabet,lower,upper); } else { // we are in the case of a single (no min/maj distinction like in thai) SET_CASE_FLAG_MACRO(upper,alphabet,2); add_letter_equivalence(alphabet,upper,upper); } } } u_fclose(f); return alphabet; }
/** * This function reads the given char order file. */ void read_char_order(const VersatileEncodingConfig* vec, const char* name, struct sort_infos* inf) { int c; int current_line = 1; U_FILE* f = u_fopen(vec, name, U_READ); if (f == NULL) { error("Cannot open file %s\n", name); return; } unichar current_canonical = '\0'; int current_priority = 0; while ((c = u_fgetc(f)) != EOF) { if (c != '\n') { /* we ignore the \n char */ if (inf->class_numbers[(unichar) c] != 0) { error("Error in %s: char 0x%x appears several times\n", name, c); } else { inf->class_numbers[(unichar) c] = current_line; if (current_canonical == '\0') { current_canonical = (unichar) c; } inf->canonical[(unichar) c] = current_canonical; inf->priority[(unichar) c] = ++current_priority; } } else { current_line++; current_canonical = '\0'; current_priority = 0; } } u_fclose(f); }
int main(int argc, char** argv) { if (argc < 2) { printf("This program requires 1 argument: The path of the PHP file to parse.\n"); return -1; } SampleObserver observer; pelet::ParserClass parser; parser.SetVersion(pelet::PHP_54); parser.SetClassObserver(&observer); parser.SetClassMemberObserver(&observer); parser.SetFunctionObserver(&observer); parser.SetVariableObserver(&observer); pelet::LintResultsClass results; bool parsed = parser.ScanFile(argv[1], results); if (parsed) { printf("Parsing complete.\n"); } else { UFILE* ufout = u_finit(stdout, NULL, NULL); u_fprintf(ufout, "Parse error: %S on line %d\n", results.Error.getTerminatedBuffer(), results.LineNumber); u_fclose(ufout); } // this is only used so that this program can be run through valgrind (memory leak // detector). In real-world usage, this include is not needed. u_cleanup(); return 0; }
cassys_tokens_list *cassys_load_text(const char *tokens_text_name, const char *text_cod_name, struct text_tokens **tokens){ int mask_encoding_compatibility_input = DEFAULT_MASK_ENCODING_COMPATIBILITY_INPUT; *tokens = load_text_tokens(tokens_text_name,mask_encoding_compatibility_input); U_FILE *f = u_fopen(BINARY, text_cod_name,U_READ); if( f == NULL){ perror("fopen\n"); fprintf(stderr,"Cannot open file %s\n",text_cod_name); exit(1); } cassys_tokens_list *list = NULL; cassys_tokens_list *temp = list; int token_id; int char_read = (int)fread(&token_id,sizeof(int),1,f); while(char_read ==1){ if(list==NULL){ list = new_element((*tokens)->token[token_id],0); temp = list; } else { temp ->next_token = new_element((*tokens)->token[token_id],0); temp = temp -> next_token; } char_read = (int)fread(&token_id,sizeof(int),1,f); } u_fclose(f); return list; }
/** * Loads the initial keyword list from a tok_by_freq.txt file, * and turns all those tokens in a list whose primary key is the * lower case token: * The/20 THE/2 the/50 => the->(The/20 THE/2 the/50) */ struct string_hash_ptr* load_tokens_by_freq(char* name,VersatileEncodingConfig* vec) { U_FILE* f=u_fopen(vec,name,U_READ); if (f==NULL) return NULL; Ustring* line=new_Ustring(128); Ustring* lower=new_Ustring(128); struct string_hash_ptr* res=new_string_hash_ptr(1024); int val,pos; /* We skip the first line of the file, containing the number * of tokens */ if (EOF==readline(line,f)) { fatal_error("Invalid empty file %s\n",name); } while (EOF!=readline(line,f)) { if (1!=u_sscanf(line->str,"%d%n",&val,&pos)) { fatal_error("Invalid line in file %s:\n%S\n",name,line->str); } u_strcpy(lower,line->str+pos); u_tolower(lower->str); int index=get_value_index(lower->str,res,INSERT_IF_NEEDED,NULL); if (index==-1) { fatal_error("Internal error in load_tokens_by_freq\n"); } KeyWord* value=(KeyWord*)res->value[index]; res->value[index]=new_KeyWord(val,line->str+pos,value); } free_Ustring(line); free_Ustring(lower); u_fclose(f); return res; }
/** * \brief Reads a 'concord.ind' file and returns a fifo list of all matches found and their replacement * * \param[in] concord_file_name the name of the concord.ind file * * \return a fifo list of all the matches found with their replacement sentences. Each element is * stored in a locate_pos structure */ struct fifo *read_concord_file(const char *concord_file_name,int mask_encoding_compatibility_input){ unichar line[4096]; struct fifo *f = new_fifo(); U_FILE *concord_desc_file; concord_desc_file = u_fopen_existing_versatile_encoding(mask_encoding_compatibility_input, concord_file_name,U_READ); if( concord_desc_file == NULL){ perror("u_fopen\n"); fprintf(stderr,"Cannot open file %s\n",concord_file_name); exit(1); } if(u_fgets(line,4096,concord_desc_file)==EOF){ fatal_error("Malformed concordance file %s",concord_file_name); } while(u_fgets(line,4096,concord_desc_file)!=EOF){ // we don't want the end of line char line[u_strlen(line)-1]='\0'; locate_pos *l = read_concord_line(line); put_ptr(f,l); } u_fclose(concord_desc_file); return f; }
static int icu_ufile__gc(lua_State *L) { UFILE* ufile = icu4lua_trustufile(L,1); if (ufile) { u_fclose(ufile); } return 0; }
/** * Loads a compound word file, adding each word to the keywords. */ void load_compound_words(char* name,VersatileEncodingConfig* vec, struct string_hash_ptr* keywords) { U_FILE* f=u_fopen(vec,name,U_READ); if (f==NULL) return; Ustring* line=new_Ustring(256); Ustring* lower=new_Ustring(256); while (EOF!=readline(line,f)) { if (line->str[0]=='{') { /* We skip tags */ continue; } u_strcpy(lower,line->str); u_tolower(lower->str); int index=get_value_index(lower->str,keywords,INSERT_IF_NEEDED,NULL); if (index==-1) { fatal_error("Internal error in load_tokens_by_freq\n"); } KeyWord* value=(KeyWord*)keywords->value[index]; add_keyword(&value,line->str,1); keywords->value[index]=value; } free_Ustring(line); free_Ustring(lower); u_fclose(f); }
static int icu_ufile_close(lua_State *L) { UFILE* ufile = icu4lua_checkopenufile(L,1,UFILE_UV_META); u_fclose(ufile); *(UFILE**)lua_touserdata(L,1) = NULL; lua_pushboolean(L,1); return 1; }
static UBool U_CALLCONV uprintf_cleanup(void) { if (gStdOut != NULL) { u_fclose(gStdOut); gStdOut = NULL; } return TRUE; }
/** * This function takes two concordance index (in1 and in2) and * produces a HTML file (out) that shows the differences between * those two concordances. */ int diff(const VersatileEncodingConfig* vec,const char* in1,const char* in2,const char* out, const char* font,int size,int diff_only) { char concor1[FILENAME_MAX]; char concor2[FILENAME_MAX]; get_path(in1,concor1); strcat(concor1,"concord-1.txt"); get_path(in2,concor2); strcat(concor2,"concord-2.txt"); /* First, we build the two concordances */ create_text_concordances(vec,in1,in2,concor1,concor2); /* Then, we load the two index */ U_FILE* f1=u_fopen(vec,in1,U_READ); if (f1==NULL) return 0; struct match_list* l1=load_match_list(f1,NULL,NULL); u_fclose(f1); U_FILE* f2=u_fopen(vec,in2,U_READ); if (f2==NULL) { return 0; } struct match_list* l2=load_match_list(f2,NULL,NULL); u_fclose(f2); /* We open the output file in UTF8, because the GUI expects this file * to be that encoded */ U_FILE* output=u_fopen(UTF8,out,U_WRITE); if (output==NULL) { fatal_error("Cannot open output file %s\n",out); return 0; } /* We open the two concordance files */ f1=u_fopen(vec,concor1,U_READ); f2=u_fopen(vec,concor2,U_READ); /* And then we fill the output file with the differences * between the two concordances */ print_diff_HTML_header(output,font,size); compute_concordance_differences(l1,l2,f1,f2,output,diff_only); print_diff_HTML_end(output); free_match_list(l1); free_match_list(l2); u_fclose(f1); u_fclose(f2); u_fclose(output); /* We remove the tmp files */ //af_remove(concor1); //af_remove(concor2); return 1; }
/** * Returns the size in bytes of the given file, or -1 if not found. */ long get_file_size(const char* name) { U_FILE* f=u_fopen(ASCII,name,U_READ); if (f==NULL) return -1; fseek(f,0,SEEK_END); long size=ftell(f); u_fclose(f); return size; }
/** * Closes the given file and frees the memory associated to the structure. */ void fst_file_close_in(Elag_fst_file_in* fstf) { if (fstf==NULL) return; if (fstf->name!=NULL) free(fstf->name); u_fclose(fstf->f); free_string_hash_ptr(fstf->symbols,(void(*)(void*))free_symbols); if (fstf->renumber!=NULL) free(fstf->renumber); free(fstf); }
/** * This method gets called when a trait use statement has been found * * @param const UnicodeString& namespace the fully qualified namespace of the class that uses the trait * @param className the fully qualified name of the class that uses the trait * @param traitName the fully qualified name of the trait to be used */ virtual void TraitUseFound(const UnicodeString& namespaceName, const UnicodeString& className, const UnicodeString& traitName) { UFILE* ufout = u_finit(stdout, NULL, NULL); u_fprintf(ufout, "Trait Usage Found in class %.*S in namespace %.*S. Trait Name %.*S \n", className.length(), className.getBuffer(), namespaceName.length(), namespaceName.getBuffer(), traitName.length(), traitName.getBuffer()); u_fclose(ufout); }
int main_Reg2Grf(int argc,char* const argv[]) { if (argc==1) { usage(); return 0; } Encoding encoding_output = DEFAULT_ENCODING_OUTPUT; int bom_output = DEFAULT_BOM_OUTPUT; int mask_encoding_compatibility_input = DEFAULT_MASK_ENCODING_COMPATIBILITY_INPUT; int val,index=-1; struct OptVars* vars=new_OptVars(); while (EOF!=(val=getopt_long_TS(argc,argv,optstring_Reg2Grf,lopts_Reg2Grf,&index,vars))) { switch(val) { case 'k': if (vars->optarg[0]=='\0') { fatal_error("Empty input_encoding argument\n"); } decode_reading_encoding_parameter(&mask_encoding_compatibility_input,vars->optarg); break; case 'q': if (vars->optarg[0]=='\0') { fatal_error("Empty output_encoding argument\n"); } decode_writing_encoding_parameter(&encoding_output,&bom_output,vars->optarg); break; case 'h': usage(); return 0; case ':': if (index==-1) fatal_error("Missing argument for option -%c\n",vars->optopt); else fatal_error("Missing argument for option --%s\n",lopts_Reg2Grf[index].name); case '?': if (index==-1) fatal_error("Invalid option -%c\n",vars->optopt); else fatal_error("Invalid option --%s\n",vars->optarg); break; } index=-1; } if (vars->optind!=argc-1) { fatal_error("Invalid arguments: rerun with --help\n"); } U_FILE* f=u_fopen_existing_versatile_encoding(mask_encoding_compatibility_input,argv[vars->optind],U_READ); if (f==NULL) { fatal_error("Cannot open file %s\n",argv[vars->optind]); } /* We read the regular expression in the file */ unichar exp[REG_EXP_MAX_LENGTH]; if ((REG_EXP_MAX_LENGTH-1)==u_fgets(exp,REG_EXP_MAX_LENGTH,f)) { fatal_error("Too long regular expression\n"); } u_fclose(f); char grf_name[FILENAME_MAX]; get_path(argv[vars->optind],grf_name); strcat(grf_name,"regexp.grf"); if (!reg2grf(exp,grf_name,encoding_output,bom_output)) { return 1; } free_OptVars(vars); u_printf("Expression converted.\n"); return 0; }
/** * This method gets called when a define declaration is found. * * @param const UnicodeString& namespace the fully qualified namespace name that the constant that was found * @param const UnicodeString& variableName the name of the defined variable * @param const UnicodeString& variableValue the variable value * @param const UnicodeString& comment PHPDoc attached to the define * @param lineNumber the line number (1-based) that the define was found in */ virtual void DefineDeclarationFound(const UnicodeString& namespaceName, const UnicodeString& variableName, const UnicodeString& variableValue, const UnicodeString& comment, const int lineNumber) { UFILE* ufout = u_finit(stdout, NULL, NULL); u_fprintf(ufout, "Define Found: %.*S in namespace %.*S on line %d\n", variableName.length(), variableName.getBuffer(), namespaceName.length(), namespaceName.getBuffer(), lineNumber); u_fclose(ufout); }
void t4p::TagCacheClass::Print() { UFILE* ufout = u_finit(stdout, NULL, NULL); u_fprintf(ufout, "Number of working caches: %d\n", WorkingCaches.size()); u_fclose(ufout); std::map<wxString, t4p::WorkingCacheClass*>::const_iterator it = WorkingCaches.begin(); for (; it != WorkingCaches.end(); ++it) { it->second->SymbolTable.Print(); } }
/** * This method gets called when a class is found. * * @param const UnicodeString& namespace the fully qualified "declared" namespace of the class that was found * @param const UnicodeString& className the name of the class that was found * @param const UnicodeString& signature the list of classes that the class inherits / implements in code format * for example "extends UserClass implements Runnable" * @param const UnicodeString& comment PHPDoc attached to the class * @param lineNumber the line number (1-based) that the class was found in */ virtual void ClassFound(const UnicodeString& namespaceName, const UnicodeString& className, const UnicodeString& signature, const UnicodeString& comment, const int lineNumber) { UFILE* ufout = u_finit(stdout, NULL, NULL); u_fprintf(ufout, "Class Found: %.*S in namespace %.*S on line %d \n", className.length(), className.getBuffer(), namespaceName.length(), namespaceName.getBuffer(), lineNumber); u_fclose(ufout); }
/** * Saves the labels of the given .fst2, closes the file * and frees the associated memory. */ void fst_file_close_out(Elag_fst_file_out* fstout) { write_fst_tags(fstout); fseek(fstout->f,fstout->fstart,SEEK_SET); /* We print the number of automata on 10 digits */ u_fprintf(fstout->f,"%010d",fstout->nb_automata); u_fclose(fstout->f); free_string_hash(fstout->labels); if (fstout->name!=NULL) free(fstout->name); free(fstout); }
int save_offsets(const VersatileEncodingConfig* vec, const char* filename, const vector_offset* offsets) { U_FILE* f_output_offsets = u_fopen(vec, filename, U_WRITE); if (f_output_offsets == NULL) { error("Cannot create offset file %s\n", filename); return 1; } save_offsets(f_output_offsets, offsets); u_fclose(f_output_offsets); return 0; }
/** * Loads snt offsets from the given binary file. */ vector_int* load_snt_offsets(const char* name) { U_FILE* f=u_fopen(BINARY,name,U_READ); if (f==NULL) return NULL; long size=get_file_size(f); if (size%(3*sizeof(int))!=0) { u_fclose(f); return NULL; } vector_int* v=new_vector_int((int)(size/sizeof(int))); if (size!=0) { int n=(int)fread(v->tab,sizeof(int),size/sizeof(int),f); u_fclose(f); if (n!=(int)(size/sizeof(int))) { free_vector_int(v); return NULL; } v->nbelems=v->size; } return v; }
bool pelet::UCharBufferedFileClass::OpenFile(UFILE* ufile, int startingCapacity) { if (NULL != File) { u_fclose(File); File = NULL; } if (!Buffer) { LineNumber = 1; Buffer = new UChar[startingCapacity]; BufferCapacity = startingCapacity; Current = Buffer; TokenStart = Buffer; Limit = Buffer; Marker = Buffer; HasReachedEof = false; Eof = NULL; } bool opened = false; File = ufile; if (NULL != File) { // point to the start of the file LineNumber = 1; CharacterPos = 0; Current = Buffer; Limit = Buffer; opened = true; int read = u_file_read(Buffer, BufferCapacity, File); Limit = Buffer + BufferCapacity - 1; if (read < BufferCapacity) { u_fclose(File); File = NULL; // insert null character as the lexers will look for null characters as EOF Buffer[read] = '\0'; HasReachedEof = true; Eof = Buffer + read; } } return opened; }
/** * Saves snt offsets to the given file, as a binary file containing integers. * Returns 1 in case of success; 0 otherwise. */ int save_snt_offsets(vector_int* snt_offsets,const char* name) { if (snt_offsets==NULL) { fatal_error("Unexpected NULL offsets in save_snt_offsets\n"); } if (snt_offsets->nbelems%3 != 0) { fatal_error("Invalid offsets in save_snt_offsets\n"); } U_FILE* f=u_fopen(BINARY,name,U_WRITE); if (f==NULL) return 0; int ret=(int)(fwrite(snt_offsets->tab,sizeof(int),snt_offsets->nbelems,f)); u_fclose(f); return (ret==snt_offsets->nbelems); }
/** * Change this to report differently when a library or commandline tool */ void jni_report( const char *fmt, ... ) { va_list ap; UChar message[128]; va_start( ap, fmt ); u_vsnprintf( message, 128, fmt, ap ); UFILE *db = u_fopen("/tmp/formatter-debug.txt","a+",NULL,NULL); if ( db != NULL ) { u_fprintf( db, "%s", message ); u_fclose( db ); } va_end( ap ); }
/** * Loads the lines of a text file into a string_hash and returns it, or NULL * if the file can not be opened. We arbitrary fix the limit of a line to 4096 * characters. Each line is splitted into a key and a value, according to a * given separator character. An error message will be printed if a line does not * contain the separator character, if an empty line is found, or if a line contains * an empty key. In case of empty values, the empty string will be used. * Note that keys and values can contain characters protected with the \ character, * including protected new lines like: * * 123\ * =ONE_TWO_THREE_NEW_LINE * */ struct string_hash* load_key_value_list(const char* name,int mask_encoding_compatibility_input,unichar separator) { U_FILE* f=u_fopen_existing_versatile_encoding(mask_encoding_compatibility_input,name,U_READ); if (f==NULL) return NULL; struct string_hash* hash=new_string_hash(); unichar temp[4096]; unichar key[4096]; unichar value[4096]; /* We build a string with the separator character */ unichar stop[2]; stop[0]=separator; stop[1]='\0'; int code; while (EOF!=(code=u_fgets2(temp,f))) { if (code==0) { error("Empty line\n"); } else { /* First, we try to read a non empty key */ int pos=0; code=parse_string(temp,&pos,key,stop); if (code==P_BACKSLASH_AT_END) { error("Backslash at end of line:<%S>\n\n",temp); } else if (pos==0 &&temp[pos]=='\0') { /* Empty line */ continue; } else if (pos==0) { /* If the line starts with the separator */ error("Line with empty key:\n<%S>\n",temp); } else { /* We jump over the separator */ pos++; /* We initialize 'value' with the empty string in case it is not * defined in the file */ value[0]='\0'; if(P_BACKSLASH_AT_END==parse_string(temp,&pos,value,P_EMPTY)) { error("Backslash at end of line:\n<%S>\n",temp); } else { /* If we have a valid (key,value) pair, we insert it into the string_hash */ get_value_index(key,hash,INSERT_IF_NEEDED,value); } } } } u_fclose(f); return hash; }
/** * Loads the given offset file. Returns NULL in case of error. */ vector_offset* load_offsets(const VersatileEncodingConfig* vec,const char* name) { U_FILE* f=u_fopen(vec,name,U_READ); if (f==NULL) return NULL; int a,b,c,d,n; vector_offset* res=new_vector_offset(); while ((n=u_fscanf(f,"%d%d%d%d",&a,&b,&c,&d))!=EOF) { if (n!=4) { fatal_error("Corrupted offset file %s\n",name); } vector_offset_add(res,a,b,c,d); } u_fclose(f); return res; }
int do_list_file_in_pack_archive_to_file_with_encoding(const char* packFileName, const char* filename_out, Encoding encoding, int filename_only) { U_FILE* fileout = NULL; if (filename_out != NULL) if (*filename_out != '\0') { fileout = u_fopen(encoding, filename_out, U_WRITE); } int result = do_list_file_in_pack_archive_to_filehandle(packFileName, fileout, filename_only); if (fileout != NULL) u_fclose(fileout); return result; }