/** * \brief test function SumComputeFile * \test * -# Compute checksum of a known file using SumComputeFile() * -# Compare if the function result correct checksum */ void testSumComputeFile() { Cksum *SumTest; FILE *Fin; Filename = "../testdata/test.zip"; char Fuid[1024]; int i; memset(Fuid,0,sizeof(Fuid)); Fin = fopen(Filename,"rb"); if (Fin) { SumTest = SumComputeFile(Fin); if (SumTest) { for(i=0; i<20; i++) { sprintf(Fuid+0+i*2,"%02X",SumTest->SHA1digest[i]); } Fuid[40]='.'; for(i=0; i<16; i++) { sprintf(Fuid+41+i*2,"%02X",SumTest->MD5digest[i]); } Fuid[73]='.'; snprintf(Fuid+74,sizeof(Fuid)-74,"%Lu",(long long unsigned int)SumTest->DataLen); //printf("%s +++++++++\n",Fuid); FO_ASSERT_STRING_EQUAL(Fuid, "5CBBD4E0487601E9160A5C887E5C0C1E6541B3DE.5234FC4D5F9786A51B2206B9DEEACA68.825"); FO_ASSERT_EQUAL((int)SumTest->DataLen, 825); free(SumTest); } fclose(Fin); } }
int main (int argc, char *argv[]) { int i; char *Result=NULL; Cksum *Sum; if (argc==1) { /* no args? read from stdin */ Sum = SumComputeFile(stdin); if (Sum) { Result=SumToString(Sum); free(Sum); } if (Result) { printf("%s\n",Result); free(Result); } } for(i=1; i<argc; i++) { if (!strcmp(argv[i],"-")) { /* read from stdin */ Sum = SumComputeFile(stdin); if (Sum) { Result=SumToString(Sum); free(Sum); } if (Result != NULL) { printf("%s %s\n",Result,argv[i]); free(Result); Result=NULL; } } else { /* read from a file */ RecurseFiles(argv[i]); } } return(0); } /* main() */
/** * \brief test function SumToString * \test * -# Get a result from SumComputeFile() * -# Call SumToString() on the result * -# Check if the function translated the structure to a string */ void testSumToString() { Cksum *SumTest; FILE *Fin; Filename = "../testdata/test.zip"; char *Fuid = NULL; Fin = fopen(Filename,"rb"); if (Fin) { SumTest = SumComputeFile(Fin); if (SumTest) { Fuid = SumToString(SumTest); FO_ASSERT_STRING_EQUAL(Fuid, "5CBBD4E0487601E9160A5C887E5C0C1E6541B3DE.5234FC4D5F9786A51B2206B9DEEACA68.825"); free(SumTest); } fclose(Fin); } }
int main(int argc, char *argv[]) { int Pid; int c; int rvExist1=0, rvExist2=0; PGresult *result; char *NewDir="."; char *AgentName = "ununpack"; char *AgentARSName = "ununpack_ars"; char *agent_desc = "Unpacks archives (iso, tar, etc)"; int Recurse=0; int ars_pk = 0; int user_pk = 0; long Pfile_size = 0; char *ListOutName=NULL; char *Fname = NULL; char *FnameCheck = NULL; char *COMMIT_HASH; char *VERSION; char agent_rev[PATH_MAX]; struct stat Stat; /* connect to the scheduler */ fo_scheduler_connect(&argc, argv, &pgConn); while((c = getopt(argc,argv,"ACc:d:FfHhL:m:PQiqRr:T:t:U:VvXx")) != -1) { switch(c) { case 'A': SetContainerArtifact=0; break; case 'C': ForceContinue=1; break; case 'c': break; /* handled by fo_scheduler_connect() */ case 'd': /* if there is a %U in the path, substitute a unique ID */ NewDir=PathCheck(optarg); break; case 'F': UseRepository=1; break; case 'f': ForceDuplicate=1; break; case 'L': ListOutName=optarg; break; case 'm': MaxThread = atoi(optarg); if (MaxThread < 1) MaxThread=1; break; case 'P': PruneFiles=1; break; case 'R': Recurse=-1; break; case 'r': Recurse=atoi(optarg); break; case 'i': if (!IsExe("dpkg-source",Quiet)) LOG_WARNING("dpkg-source is not available on this system. This means that debian source packages will NOT be unpacked."); SafeExit(0); break; /* never reached */ case 'Q': UseRepository=1; user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */ /* Get the upload_pk from the scheduler */ if((Upload_Pk = fo_scheduler_next()) == NULL) SafeExit(0); break; case 'q': Quiet=1; break; case 'T': memset(REP_GOLD,0,sizeof(REP_GOLD)); strncpy(REP_GOLD,optarg,sizeof(REP_GOLD)-1); break; case 't': memset(REP_FILES,0,sizeof(REP_FILES)); strncpy(REP_FILES,optarg,sizeof(REP_FILES)-1); break; case 'U': UseRepository = 1; Recurse = -1; Upload_Pk = optarg; break; case 'V': printf("%s", BuildVersion);SafeExit(0); case 'v': Verbose++; break; case 'X': UnlinkSource=1; break; case 'x': UnlinkAll=1; break; default: Usage(argv[0], BuildVersion); SafeExit(25); } } /* Open DB and Initialize CMD table */ if (UseRepository) { /* Check Permissions */ if (GetUploadPerm(pgConn, atoi(Upload_Pk), user_pk) < PERM_WRITE) { LOG_ERROR("You have no update permissions on upload %s", Upload_Pk); SafeExit(100); } COMMIT_HASH = fo_sysconfig(AgentName, "COMMIT_HASH"); VERSION = fo_sysconfig(AgentName, "VERSION"); sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH); /* Get the unpack agent key */ agent_pk = fo_GetAgentKey(pgConn, AgentName, atoi(Upload_Pk), agent_rev,agent_desc); InitCmd(); /* Make sure ars table exists */ if (!fo_CreateARSTable(pgConn, AgentARSName)) SafeExit(0); /* Has this user previously unpacked this upload_pk successfully? * In this case we are done. No new ars record is needed since no * processing is initiated. * The unpack version is ignored. */ snprintf(SQL,MAXSQL, "SELECT ars_pk from %s where upload_fk='%s' and ars_success=TRUE", AgentARSName, Upload_Pk); result = PQexec(pgConn, SQL); if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(101); if (PQntuples(result) > 0) /* if there is a value */ { PQclear(result); LOG_WARNING("Upload_pk %s, has already been unpacked. No further action required", Upload_Pk) SafeExit(0); } PQclear(result); /* write the unpack_ars start record */ ars_pk = fo_WriteARS(pgConn, ars_pk, atoi(Upload_Pk), agent_pk, AgentARSName, 0, 0); /* Get Pfile path and Pfile_Pk, from Upload_Pk */ snprintf(SQL,MAXSQL, "SELECT pfile.pfile_sha1 || '.' || pfile.pfile_md5 || '.' || pfile.pfile_size AS pfile, pfile_fk, pfile_size FROM upload INNER JOIN pfile ON upload.pfile_fk = pfile.pfile_pk WHERE upload.upload_pk = '%s'", Upload_Pk); result = PQexec(pgConn, SQL); if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(102); if (PQntuples(result) > 0) /* if there is a value */ { Pfile = strdup(PQgetvalue(result,0,0)); Pfile_Pk = strdup(PQgetvalue(result,0,1)); Pfile_size = atol(PQgetvalue(result, 0, 2)); if (Pfile_size == 0) { PQclear(result); LOG_WARNING("Uploaded file (Upload_pk %s), is zero length. There is nothing to unpack.", Upload_Pk) SafeExit(0); } PQclear(result); } // Determine if uploadtree records should go into a separate table. // If the input file size is > 500MB, then create a separate uploadtree_{upload_pk} table // that inherits from the master uploadtree table. // Save uploadtree_tablename, it will get written to upload.uploadtree_tablename later. if (Pfile_size > 500000000) { sprintf(uploadtree_tablename, "uploadtree_%s", Upload_Pk); if (!fo_tableExists(pgConn, uploadtree_tablename)) { snprintf(SQL,MAXSQL,"CREATE TABLE %s (LIKE uploadtree INCLUDING DEFAULTS INCLUDING CONSTRAINTS INCLUDING INDEXES); ALTER TABLE %s ADD CONSTRAINT %s CHECK (upload_fk=%s); ALTER TABLE %s INHERIT uploadtree", uploadtree_tablename, uploadtree_tablename, uploadtree_tablename, Upload_Pk, uploadtree_tablename); PQsetNoticeProcessor(pgConn, SQLNoticeProcessor, SQL); // ignore notice about implicit primary key index creation result = PQexec(pgConn, SQL); // Ignore postgres notice about creating an implicit index if (PQresultStatus(result) != PGRES_NONFATAL_ERROR) if (fo_checkPQcommand(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(103); PQclear(result); } } else strcpy(uploadtree_tablename, "uploadtree_a"); } CheckCommands(Quiet); if (NewDir) MkDir(NewDir); if (Verbose) { fclose(stderr) ; stderr=stdout; } /* don't interlace! */ if (ListOutName != NULL) { if ((ListOutName[0]=='-') && (ListOutName[1]=='\0')) ListOutFile=stdout; else ListOutFile = fopen(ListOutName,"w"); if (!ListOutFile) { LOG_ERROR("pfile %s Unable to write to %s\n",Pfile_Pk,ListOutName) SafeExit(104); } else { /* Start the file */ fputs("<xml tool=\"ununpack\" ",ListOutFile); fputs("version=\"",ListOutFile); fputs(Version,ListOutFile); fputs("\" ",ListOutFile); fputs("compiled_date=\"",ListOutFile); fputs(__DATE__,ListOutFile); fputs(" ",ListOutFile); fputs(__TIME__,ListOutFile); fputs("\"",ListOutFile); fputs(">\n",ListOutFile); } /* Problem: When parallel processing, the XML may be generated out of order. Solution? When using XML, only use 1 thread. */ MaxThread=1; } // Set ReunpackSwitch if the uploadtree records are missing from the database. if (!ReunpackSwitch && UseRepository) { snprintf(SQL,MAXSQL,"SELECT uploadtree_pk FROM uploadtree WHERE upload_fk=%s limit 1;",Upload_Pk); result = PQexec(pgConn, SQL); if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(105); if (PQntuples(result) == 0) ReunpackSwitch=1; PQclear(result); } /*** process files from command line ***/ for( ; optind<argc; optind++) { CksumFile *CF=NULL; Cksum *Sum; int i; if (Fname) { free(Fname); Fname=NULL; } if (ListOutName != NULL) { fprintf(ListOutFile,"<source source=\"%s\" ",argv[optind]); if (UseRepository && !fo_RepExist(REP_FILES,argv[optind])) { /* make sure the source exists in the src repository */ if (fo_RepImport(argv[optind],REP_FILES,argv[optind],1) != 0) { LOG_ERROR("Failed to import '%s' as '%s' into the repository",argv[optind],argv[optind]) SafeExit(106); } } } if (UseRepository) { if (fo_RepExist(REP_FILES,argv[optind])) { Fname=fo_RepMkPath(REP_FILES,argv[optind]); } else if (fo_RepExist(REP_GOLD,argv[optind])) { Fname=fo_RepMkPath(REP_GOLD,argv[optind]); if (fo_RepImport(Fname,REP_FILES,argv[optind],1) != 0) { LOG_ERROR("Failed to import '%s' as '%s' into the repository",Fname,argv[optind]) SafeExit(107); } } if (Fname) { FnameCheck = Fname; CF = SumOpenFile(Fname); } else { LOG_ERROR("NO file unpacked. File %s does not exist either in GOLD or FILES", Pfile); SafeExit(108); } /* else: Fname is NULL and CF is NULL */ } else { FnameCheck = argv[optind]; CF = SumOpenFile(argv[optind]); } /* Check file to unpack. Does it exist? Is it zero length? */ if (stat(FnameCheck,&Stat)) { LOG_ERROR("File to unpack is unavailable: %s, error: %s", Fname, strerror(errno)); SafeExit(109); } else if (Stat.st_size < 1) { LOG_WARNING("File to unpack is empty: %s", Fname); SafeExit(110); } if (ListOutFile) { if (CF) { Sum = SumComputeBuff(CF); SumCloseFile(CF); if (Sum) { fputs("fuid=\"",ListOutFile); for(i=0; i<20; i++) { fprintf(ListOutFile,"%02X",Sum->SHA1digest[i]); } fputs(".",ListOutFile); for(i=0; i<16; i++) { fprintf(ListOutFile,"%02X",Sum->MD5digest[i]); } fputs(".",ListOutFile); fprintf(ListOutFile,"%Lu",(long long unsigned int)Sum->DataLen); fputs("\" ",ListOutFile); free(Sum); } /* if Sum */ } /* if CF */ else /* file too large to mmap (probably) */ { FILE *Fin; Fin = fopen(argv[optind],"rb"); if (Fin) { Sum = SumComputeFile(Fin); if (Sum) { fputs("fuid=\"",ListOutFile); for(i=0; i<20; i++) { fprintf(ListOutFile,"%02X",Sum->SHA1digest[i]); } fputs(".",ListOutFile); for(i=0; i<16; i++) { fprintf(ListOutFile,"%02X",Sum->MD5digest[i]); } fputs(".",ListOutFile); fprintf(ListOutFile,"%Lu",(long long unsigned int)Sum->DataLen); fputs("\" ",ListOutFile); free(Sum); } fclose(Fin); } } /* else no CF */ fprintf(ListOutFile,">\n"); /* end source XML */ } if (Fname) TraverseStart(Fname,"called by main via args",NewDir,Recurse); else TraverseStart(argv[optind],"called by main",NewDir,Recurse); if (ListOutName != NULL) fprintf(ListOutFile,"</source>\n"); } /* end for */ /* free memory */ if (Fname) { free(Fname); Fname=NULL; } /* process pfile from scheduler */ if (Pfile) { if (0 == (rvExist1 = fo_RepExist2(REP_FILES,Pfile))) { Fname=fo_RepMkPath(REP_FILES,Pfile); } else if (0 == (rvExist2 = fo_RepExist2(REP_GOLD,Pfile))) { Fname=fo_RepMkPath(REP_GOLD,Pfile); if (fo_RepImport(Fname,REP_FILES,Pfile,1) != 0) { LOG_ERROR("Failed to import '%s' as '%s' into the repository",Fname,Pfile) SafeExit(111); } } if (Fname) { TraverseStart(Fname,"called by main via env",NewDir,Recurse); free(Fname); Fname=NULL; } else { LOG_ERROR("NO file unpacked!"); if (rvExist1 > 0) { Fname=fo_RepMkPath(REP_FILES, Pfile); LOG_ERROR("Error is %s for %s", strerror(rvExist1), Fname); } if (rvExist2 > 0) { Fname=fo_RepMkPath(REP_GOLD, Pfile); LOG_ERROR("Error is %s for %s", strerror(rvExist2), Fname); } SafeExit(112); } } /* recurse on all the children */ if (Thread > 0) do { Pid = ParentWait(); Thread--; if (Pid >= 0) { if (!Queue[Pid].ChildEnd) { /* copy over data */ if (Recurse > 0) Traverse(Queue[Pid].ChildRecurse,NULL,"called by wait",NULL,Recurse-1,&Queue[Pid].PI); else if (Recurse < 0) Traverse(Queue[Pid].ChildRecurse,NULL,"called by wait",NULL,Recurse,&Queue[Pid].PI); } } } while(Pid >= 0); if (MagicCookie) magic_close(MagicCookie); if (ListOutFile) { fprintf(ListOutFile,"<summary files_regular=\"%d\" files_compressed=\"%d\" artifacts=\"%d\" directories=\"%d\" containers=\"%d\" />\n", TotalFiles,TotalCompressedFiles,TotalArtifacts, TotalDirectories,TotalContainers); fputs("</xml>\n",ListOutFile); } if (pgConn) { /* If it completes, mark it! */ if (Upload_Pk) { snprintf(SQL,MAXSQL,"UPDATE upload SET upload_mode = (upload_mode | (1<<5)), uploadtree_tablename='%s' WHERE upload_pk = '%s';",uploadtree_tablename, Upload_Pk); result = PQexec(pgConn, SQL); /* UPDATE upload */ if (fo_checkPQcommand(pgConn, result, SQL, __FILE__ ,__LINE__)) SafeExit(113); PQclear(result); snprintf(SQL,MAXSQL,"UPDATE %s SET realparent = getItemParent(uploadtree_pk) WHERE upload_fk = '%s'",uploadtree_tablename, Upload_Pk); result = PQexec(pgConn, SQL); /* UPDATE uploadtree */ if (fo_checkPQcommand(pgConn, result, SQL, __FILE__ ,__LINE__)) SafeExit(114); PQclear(result); } if (ars_pk) fo_WriteARS(pgConn, ars_pk, atoi(Upload_Pk), agent_pk, AgentARSName, 0, 1); } if (ListOutFile && (ListOutFile != stdout)) { fclose(ListOutFile); } if (UnlinkAll && MaxThread > 1) { /* Delete temporary files */ if (strcmp(NewDir, ".")) RemoveDir(NewDir); } SafeExit(0); return(0); // never executed but makes the compiler happy }
/********************************************************* DBLoadGold(): Insert a file into the database and repository. (This mimicks the old webgoldimport.) *********************************************************/ void DBLoadGold () { Cksum *Sum; char *Unique=NULL; char *SHA1, *MD5, *Len; char SQL[MAXCMD]; long PfileKey; char *Path; FILE *Fin; int rc; if (Debug) printf("Processing %s\n",GlobalTempFile); Fin = fopen(GlobalTempFile,"rb"); if (!Fin) { printf("ERROR upload %ld Unable to open temp file.\n",GlobalUploadKey); printf("LOG upload %ld Unable to open temp file %s from %s\n", GlobalUploadKey,GlobalTempFile,GlobalURL); fflush(stdout); DBclose(DB); exit(1); } Sum = SumComputeFile(Fin); fclose(Fin); if (ForceGroup > 0) { chown(GlobalTempFile,-1,ForceGroup); } if (!Sum) { printf("ERROR upload %ld Unable to compute checksum.\n",GlobalUploadKey); printf("LOG upload %ld Unable to compute checksum for %s from %s\n", GlobalUploadKey,GlobalTempFile,GlobalURL); fflush(stdout); DBclose(DB); exit(2); } if (Sum->DataLen <= 0) { printf("ERROR upload %ld No bytes downloaded from %s.\n",GlobalUploadKey,GlobalURL); printf("LOG upload %ld No bytes downloaded from %s to %s.\n", GlobalUploadKey,GlobalURL,GlobalTempFile); fflush(stdout); DBclose(DB); exit(3); } Unique = SumToString(Sum); if (Debug) printf("Unique %s\n",Unique); if (GlobalImportGold) { if (Debug) printf("Import Gold %s\n",Unique); rc = RepImport(GlobalTempFile,"gold",Unique,1); if (rc != 0) { printf("ERROR upload %ld Failed to import file into the repository (RepImport=%d).\n",GlobalUploadKey,rc); printf("LOG upload %ld Failed to import %s from %s into gold %s\n", GlobalUploadKey,GlobalTempFile,GlobalURL,Unique); fflush(stdout); DBclose(DB); exit(4); } /* Put the file in the "files" repository too */ Path = RepMkPath("gold",Unique); if (ForceGroup >= 0) { chown(Path,-1,ForceGroup); } } /* if GlobalImportGold */ else /* if !GlobalImportGold */ { Path = GlobalTempFile; } /* else if !GlobalImportGold */ if (Debug) printf("Path is %s\n",Path); if (!Path) { printf("ERROR upload %ld Failed to determine repository location.\n",GlobalUploadKey); printf("LOG upload %ld Failed to determine repository location for %s in gold\n", GlobalUploadKey,Unique); fflush(stdout); DBclose(DB); exit(5); } if (Debug) printf("Import files %s\n",Path); if (RepImport(Path,"files",Unique,1) != 0) { printf("ERROR upload %ld Failed to import file into the repository.\n",GlobalUploadKey); printf("LOG upload %ld Failed to import %s from %s into files\n", GlobalUploadKey,Unique,Path); fflush(stdout); DBclose(DB); exit(6); } if (ForceGroup >= 0) { chown(Path,-1,ForceGroup); } if (Path != GlobalTempFile) free(Path); /* Now update the DB */ /** Break out the sha1, md5, len components **/ SHA1 = Unique; MD5 = Unique+41; /* 40 for sha1 + 1 for '.' */ Len = Unique+41+33; /* 32 for md5 + 1 for '.' */ /** Set the pfile **/ memset(SQL,'\0',MAXCMD); snprintf(SQL,MAXCMD-1,"SELECT pfile_pk FROM pfile WHERE pfile_sha1 = '%.40s' AND pfile_md5 = '%.32s' AND pfile_size = %s;", SHA1,MD5,Len); if (DBaccess(DB,SQL) < 0) { printf("ERROR upload %ld Unable to select from the database\n",GlobalUploadKey); printf("LOG upload %ld Unable to select from the database: %s\n",GlobalUploadKey,SQL); fflush(stdout); DBclose(DB); exit(7); } /* See if pfile needs to be added */ if (DBdatasize(DB) <= 0) { /* Insert it */ memset(SQL,'\0',MAXCMD); snprintf(SQL,MAXCMD-1,"INSERT INTO pfile (pfile_sha1, pfile_md5, pfile_size) VALUES ('%.40s','%.32s',%s);", SHA1,MD5,Len); if (DBaccess(DB,SQL) < 0) { printf("ERROR upload %ld Unable to select from the database\n",GlobalUploadKey); printf("LOG upload %ld Unable to select from the database: %s\n",GlobalUploadKey,SQL); fflush(stdout); DBclose(DB); exit(8); } DBaccess(DB,"SELECT currval('pfile_pfile_pk_seq');"); } PfileKey = atol(DBgetvalue(DB,0,0)); if (Debug) printf("pfile_pk = %ld\n",PfileKey); /* Upload the DB so the pfile is linked to the upload record */ DBaccess(DB,"BEGIN;"); memset(SQL,'\0',MAXCMD); snprintf(SQL,MAXCMD-1,"SELECT * FROM upload WHERE upload_pk=%ld FOR UPDATE;",GlobalUploadKey); DBaccess(DB,SQL); memset(SQL,'\0',MAXCMD); snprintf(SQL,MAXCMD-1,"UPDATE upload SET pfile_fk=%ld WHERE upload_pk=%ld;", PfileKey,GlobalUploadKey); if (Debug) printf("SQL=%s\n",SQL); if (DBaccess(DB,SQL) < 0) { printf("ERROR upload %ld Unable to update the database\n",GlobalUploadKey); printf("LOG upload %ld Unable to update the database: %s\n",GlobalUploadKey,SQL); fflush(stdout); DBclose(DB); exit(9); } DBaccess(DB,"COMMIT;"); /* Clean up */ free(Sum); } /* DBLoadGold() */
/********************************************** RecurseFiles(): Process all files in all directories. **********************************************/ void RecurseFiles (char *S) { char NewS[FILENAME_MAX+1]; DIR *Dir; struct dirent *Entry; struct stat64 Stat; CksumFile *CF; char *Result=NULL; Cksum *Sum; Dir = opendir(S); if (Dir == NULL) { Result=NULL; /* it's a single file -- compute checksum */ CF = SumOpenFile(S); if (CF == NULL) { FILE *Fin; Fin = fopen64(S,"rb"); if (!Fin) { perror("Huh?"); fprintf(stderr,"ERROR: cannot open file \"%s\".\n",S); } else { Sum = SumComputeFile(Fin); if (Sum) { Result=SumToString(Sum); free(Sum); } fclose(Fin); } } else { Sum = SumComputeBuff(CF); if (Sum) { Result=SumToString(Sum); free(Sum); } SumCloseFile(CF); } if (Result != NULL) { printf("%s %s\n",Result,S); free(Result); Result=NULL; } return; } Entry = readdir(Dir); while(Entry != NULL) { if (!strcmp(Entry->d_name,".")) goto skip; if (!strcmp(Entry->d_name,"..")) goto skip; memset(NewS,'\0',sizeof(NewS)); strcpy(NewS,S); strcat(NewS,"/"); strcat(NewS,Entry->d_name); lstat64(NewS,&Stat); Result=NULL; if (S_ISDIR(Stat.st_mode)) RecurseFiles(NewS); else { /* compute checksum */ CF = SumOpenFile(NewS); if (CF == NULL) { FILE *Fin; Fin = fopen64(NewS,"rb"); if (!Fin) fprintf(stderr,"ERROR: Cannot open file \"%s\".\n",NewS); else { Sum = SumComputeFile(Fin); if (Sum) { Result=SumToString(Sum); free(Sum); } fclose(Fin); } } else { Sum = SumComputeBuff(CF); if (Sum) { Result=SumToString(Sum); free(Sum); } SumCloseFile(CF); } if (Result != NULL) { printf("%s %s\n",Result,NewS); free(Result); Result=NULL; } } skip: Entry = readdir(Dir); } closedir(Dir); } /* RecurseFiles() */