/** * \brief process the jobs from scheduler * * -# Read the jobs from the scheduler using fo_scheduler_next(). * -# Get the permission level of the current user. * -# Parse the parameters and process * \see fo_scheduler_next() * \see readAndProcessParameter() */ void doSchedulerTasks() { char *Parm = NULL; char SQL[MAXSQL]; PGresult *result; int userId = -1; int userPerm = -1; while(fo_scheduler_next()) { Parm = fo_scheduler_current(); userId = fo_scheduler_userID(); /* get perm level of user */ snprintf(SQL,MAXSQL,"SELECT user_perm FROM users WHERE user_pk='%d';", userId); result = PQexec(pgConn, SQL); if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__) || !PQntuples(result)) { exitNow(0); } userPerm = atoi(PQgetvalue(result, 0, 0)); PQclear(result); int returnCode = readAndProcessParameter(Parm, userId, userPerm); if (returnCode != 0) { /* Loglevel is to high, but scheduler expects FATAL log message before exit */ LOG_FATAL("Due to permission problems, the delagent was not able to list or delete the requested objects or they did not exist."); exitNow(returnCode); } } }
int main(int argc, char** argv) { fo_scheduler_connect(&argc, argv, NULL); if(fo_scheduler_next() == NULL) fo_scheduler_disconnect(0); else fo_scheduler_disconnect(-1); return 0; }
int main(int argc, char** argv) { fo_scheduler_connect(&argc, argv, NULL); while(1) { fo_scheduler_next(); } fo_scheduler_disconnect(0); return 0; }
int main (int argc, char *argv[]) { int nonoptargs; int c, retCode; regex_t regex; char regexStr[1024]; /* string storage for the regex expression */ bool regexSet = false; char fileName[1000]; FILE *scanFilePtr; char uploadNum[10]; int scannedCount = 0; int user_pk; long UploadPK=-1; char *COMMIT_HASH; char *VERSION; char agent_rev[myBUFSIZ]; /* connect to scheduler. Noop if not run from scheduler. */ fo_scheduler_connect(&argc, argv, &pgConn); /* Version reporting. */ COMMIT_HASH = fo_sysconfig("regexscan", "COMMIT_HASH"); VERSION = fo_sysconfig("regexscan", "VERSION"); sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH); #ifdef REGEX_DEBUG fprintf(stdout, "regexscan reports version info as '%s.%s'.\n", VERSION, COMMIT_HASH); #endif /* Process command-line */ while((c = getopt(argc,argv,"chir:v")) != -1) { switch(c) { case 'c': break; /* handled by fo_scheduler_connect() */ case 'i': PQfinish(pgConn); return(0); case 'r': sprintf(regexStr, "%s", optarg); regexSet = true; break; case 'v': agent_verbose++; break; case 'h': default: Usage(argv[0]); fflush(stdout); PQfinish(pgConn); exit(-1); } } /* Sanity check for regex value required here. */ if (!regexSet) { fprintf (stderr, "No regex value has been requested!\n"); PQfinish(pgConn); fo_scheduler_disconnect(0); return 1; } /* process filename after switches. How many non-option arguments are there ? */ nonoptargs = argc - optind; /* total argument count minus the option count */ if (nonoptargs == 0) { /* Assume it was a scheduler call */ user_pk = fo_scheduler_userID(); while(fo_scheduler_next()) { UploadPK = atol(fo_scheduler_current()); printf("UploadPK is: %ld\n", UploadPK); sprintf(uploadNum, "%ld", UploadPK); scannedCount = regexScanUpload(uploadNum, regexStr); if (scannedCount == 0) { fprintf(stderr, "Failed to successfully scan: upload - %s!\n", uploadNum); } } } else { /* File access initialization - For Stage 3 use first arg as fileName */ sprintf(fileName, "%s", argv[optind]); /* Grab first non-switch argument as filename */ scanFilePtr = fopen(fileName, "r"); if (!scanFilePtr) { fprintf(stderr, "ERROR: Unable to open '%s'\n", fileName); PQfinish(pgConn); fo_scheduler_disconnect(0); } /* Compile the regex for improved performance */ retCode = regcomp(®ex, regexStr, REG_ICASE+REG_EXTENDED); if (retCode) { fprintf(stderr, "regex %s failed to compile\n", regexStr); PQfinish(pgConn); fo_scheduler_disconnect(0); } /* Now call the function that scans a file for a regex */ retCode = regexScan(®ex, (char *)regexStr, scanFilePtr, (char *)fileName); // retCode = regexScan(uploadNum, regexStr); if (retCode != 0) { fprintf(stderr, "Failed to successfully scan: %s!\n", fileName); } } PQfinish(pgConn); fo_scheduler_disconnect(0); return 0; } /* main() */
int main(int argc, char **argv) { char *agentDesc = "Bucket agent"; int cmdopt; int verbose = 0; int ReadFromStdin = 1; int head_uploadtree_pk = 0; PGconn *pgConn; PGresult *topresult; PGresult *result; char sqlbuf[512]; char *Delims = ",= \t\n\r"; char *token, *saveptr; int agent_pk = 0; int nomos_agent_pk = 0; int bucketpool_pk = 0; int ars_pk = 0; int readnum = 0; int rv; int hasPrules; int user_pk = 0; char *bucketpool_name; char *COMMIT_HASH; char *VERSION; char *uploadtree_tablename; char agent_rev[myBUFSIZ]; int rerun = 0; // int *bucketList; pbucketdef_t bucketDefArray = 0; pbucketdef_t tmpbucketDefArray = 0; cacheroot_t cacheroot; uploadtree_t uploadtree; uploadtree.upload_fk = 0; /* connect to the scheduler */ fo_scheduler_connect(&argc, argv, &pgConn); user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */ /* command line options */ while ((cmdopt = getopt(argc, argv, "rin:p:t:u:vc:hV")) != -1) { switch (cmdopt) { case 'i': /* "Initialize" */ PQfinish(pgConn); exit(0); case 'n': /* bucketpool_name */ ReadFromStdin = 0; bucketpool_name = optarg; /* find the highest rev active bucketpool_pk */ if (!bucketpool_pk) { bucketpool_pk = getBucketpool_pk(pgConn, bucketpool_name); if (!bucketpool_pk) printf("%s is not an active bucketpool name.\n", bucketpool_name); } break; case 'p': /* bucketpool_pk */ ReadFromStdin = 0; bucketpool_pk = atoi(optarg); /* validate bucketpool_pk */ sprintf(sqlbuf, "select bucketpool_pk from bucketpool where bucketpool_pk=%d and active='Y'", bucketpool_pk); bucketpool_pk = validate_pk(pgConn, sqlbuf); if (!bucketpool_pk) printf("%d is not an active bucketpool_pk.\n", atoi(optarg)); break; case 't': /* uploadtree_pk */ ReadFromStdin = 0; if (uploadtree.upload_fk) break; head_uploadtree_pk = atoi(optarg); /* validate bucketpool_pk */ sprintf(sqlbuf, "select uploadtree_pk from uploadtree where uploadtree_pk=%d", head_uploadtree_pk); head_uploadtree_pk = validate_pk(pgConn, sqlbuf); if (!head_uploadtree_pk) printf("%d is not an active uploadtree_pk.\n", atoi(optarg)); break; case 'u': /* upload_pk */ ReadFromStdin = 0; if (!head_uploadtree_pk) { uploadtree.upload_fk = atoi(optarg); /* validate upload_pk and get uploadtree_pk */ sprintf(sqlbuf, "select upload_pk from upload where upload_pk=%d", uploadtree.upload_fk); uploadtree.upload_fk = validate_pk(pgConn, sqlbuf); if (!uploadtree.upload_fk) printf("%d is not an valid upload_pk.\n", atoi(optarg)); else { sprintf(sqlbuf, "select uploadtree_pk from uploadtree where upload_fk=%d and parent is null", uploadtree.upload_fk); head_uploadtree_pk = validate_pk(pgConn, sqlbuf); } } break; case 'v': /* verbose output for debugging */ verbose++; break; case 'c': break; /* handled by fo_scheduler_connect() */ case 'r': rerun = 1; /** rerun bucket */ break; case 'V': /* print version info */ printf("%s", BuildVersion); PQfinish(pgConn); exit(0); default: Usage(argv[0]); PQfinish(pgConn); exit(-1); } } debug = verbose; /*** validate command line ***/ if (!bucketpool_pk && !ReadFromStdin) { printf("FATAL: You must specify an active bucketpool.\n"); Usage(argv[0]); exit(-1); } if (!head_uploadtree_pk && !ReadFromStdin) { printf("FATAL: You must specify a valid uploadtree_pk or upload_pk.\n"); Usage(argv[0]); exit(-1); } /* get agent pk * Note, if GetAgentKey fails, this process will exit. */ COMMIT_HASH = fo_sysconfig("buckets", "COMMIT_HASH"); VERSION = fo_sysconfig("buckets", "VERSION"); sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH); agent_pk = fo_GetAgentKey(pgConn, basename(argv[0]), uploadtree.upload_fk, agent_rev, agentDesc); /*** Initialize the license_ref table cache ***/ /* Build the license ref cache to hold 2**11 (2048) licenses. This MUST be a power of 2. */ cacheroot.maxnodes = 2<<11; cacheroot.nodes = calloc(cacheroot.maxnodes, sizeof(cachenode_t)); if (!lrcache_init(pgConn, &cacheroot)) { printf("FATAL: Bucket agent could not allocate license_ref table cache.\n"); exit(1); } /* main processing loop */ while(++readnum) { uploadtree.upload_fk = 0; if (ReadFromStdin) { bucketpool_pk = 0; /* Read the bucketpool_pk and upload_pk from stdin. * Format looks like 'bppk=123, upk=987' */ if (!fo_scheduler_next()) break; token = strtok_r(fo_scheduler_current(), Delims, &saveptr); while (token && (!uploadtree.upload_fk || !bucketpool_pk)) { if (strcmp(token, "bppk") == 0) { bucketpool_pk = atoi(strtok_r(NULL, Delims, &saveptr)); } else if (strcmp(token, "upk") == 0) { uploadtree.upload_fk = atoi(strtok_r(NULL, Delims, &saveptr)); } token = strtok_r(NULL, Delims, &saveptr); } /* Check Permissions */ if (GetUploadPerm(pgConn, uploadtree.upload_fk, user_pk) < PERM_WRITE) { LOG_ERROR("You have no update permissions on upload %d", uploadtree.upload_fk); continue; } /* From the upload_pk, get the head of the uploadtree, pfile_pk and ufile_name */ sprintf(sqlbuf, "select uploadtree_pk, pfile_fk, ufile_name, ufile_mode,lft,rgt from uploadtree \ where upload_fk='%d' and parent is null limit 1", uploadtree.upload_fk); topresult = PQexec(pgConn, sqlbuf); if (fo_checkPQresult(pgConn, topresult, sqlbuf, agentDesc, __LINE__)) return -1; if (PQntuples(topresult) == 0) { printf("ERROR: %s.%s missing upload_pk %d.\nsql: %s", __FILE__, agentDesc, uploadtree.upload_fk, sqlbuf); PQclear(topresult); continue; } head_uploadtree_pk = atol(PQgetvalue(topresult, 0, 0)); uploadtree.uploadtree_pk = head_uploadtree_pk; uploadtree.upload_fk = uploadtree.upload_fk; uploadtree.pfile_fk = atol(PQgetvalue(topresult, 0, 1)); uploadtree.ufile_name = strdup(PQgetvalue(topresult, 0, 2)); uploadtree.ufile_mode = atoi(PQgetvalue(topresult, 0, 3)); uploadtree.lft = atoi(PQgetvalue(topresult, 0, 4)); uploadtree.rgt = atoi(PQgetvalue(topresult, 0, 5)); PQclear(topresult); } /* end ReadFromStdin */ else { /* Only one input to process if from command line, so terminate if it's been done */ if (readnum > 1) break; /* not reading from stdin * Get the pfile, and ufile_name for head_uploadtree_pk */ sprintf(sqlbuf, "select pfile_fk, ufile_name, ufile_mode,lft,rgt, upload_fk from uploadtree where uploadtree_pk=%d", head_uploadtree_pk); topresult = PQexec(pgConn, sqlbuf); if (fo_checkPQresult(pgConn, topresult, sqlbuf, agentDesc, __LINE__)) { free(uploadtree.ufile_name); return -1; } if (PQntuples(topresult) == 0) { printf("FATAL: %s.%s missing root uploadtree_pk %d\n", __FILE__, agentDesc, head_uploadtree_pk); PQclear(topresult); continue; } uploadtree.uploadtree_pk = head_uploadtree_pk; uploadtree.pfile_fk = atol(PQgetvalue(topresult, 0, 0)); uploadtree.ufile_name = strdup(PQgetvalue(topresult, 0, 1)); uploadtree.ufile_mode = atoi(PQgetvalue(topresult, 0, 2)); uploadtree.lft = atoi(PQgetvalue(topresult, 0, 3)); uploadtree.rgt = atoi(PQgetvalue(topresult, 0, 4)); uploadtree.upload_fk = atoi(PQgetvalue(topresult, 0, 5)); PQclear(topresult); } /* Find the most recent nomos data for this upload. That's what we want to use to process the buckets. */ nomos_agent_pk = LatestNomosAgent(pgConn, uploadtree.upload_fk); if (nomos_agent_pk == 0) { printf("WARNING: Bucket agent called on treeitem (%d), but the latest nomos agent hasn't created any license data for this tree.\n", head_uploadtree_pk); continue; } /* at this point we know: * bucketpool_pk, bucket agent_pk, nomos agent_pk, upload_pk, * pfile_pk, and head_uploadtree_pk (the uploadtree_pk of the head tree to scan) */ /* Has the upload already been processed? If so, we are done. Don't even bother to create a bucket_ars entry. */ switch (UploadProcessed(pgConn, agent_pk, nomos_agent_pk, uploadtree.pfile_fk, head_uploadtree_pk, uploadtree.upload_fk, bucketpool_pk)) { case 1: /* upload has already been processed */ if (1 == rerun) break; printf("LOG: Duplicate request for bucket agent to process upload_pk: %d, uploadtree_pk: %d, bucketpool_pk: %d, bucket agent_pk: %d, nomos agent_pk: %d, pfile_pk: %d ignored.\n", uploadtree.upload_fk, head_uploadtree_pk, bucketpool_pk, agent_pk, nomos_agent_pk, uploadtree.pfile_fk); continue; case -1: /* SQL error, UploadProcessed() wrote error message */ continue; case 0: /* upload has not been processed */ break; } /*** Initialize the Bucket Definition List bucketDefArray ***/ bucketDefArray = initBuckets(pgConn, bucketpool_pk, &cacheroot); if (bucketDefArray == 0) { printf("FATAL: %s.%d Bucket definition for pool %d could not be initialized.\n", __FILE__, __LINE__, bucketpool_pk); exit(-2); } bucketDefArray->nomos_agent_pk = nomos_agent_pk; bucketDefArray->bucket_agent_pk = agent_pk; /* Find the correct uploadtree table name */ uploadtree_tablename = GetUploadtreeTableName(pgConn, uploadtree.upload_fk); if (!(uploadtree_tablename)) { LOG_FATAL("buckets passed invalid upload, upload_pk = %d", uploadtree.upload_fk); return(-110); } /* set uploadtree_tablename in all the bucket definition structs */ for (tmpbucketDefArray = bucketDefArray; tmpbucketDefArray->bucket_pk; tmpbucketDefArray++) { tmpbucketDefArray->uploadtree_tablename = uploadtree_tablename; } /* loop through rules (bucket defs) to see if there are any package only rules */ hasPrules = 0; for (tmpbucketDefArray = bucketDefArray; tmpbucketDefArray->bucket_pk; tmpbucketDefArray++) if (tmpbucketDefArray->applies_to == 'p') { hasPrules = 1; break; } /*** END initializing bucketDefArray ***/ /*** Initialize DEB_SOURCE and DEB_BINARY ***/ sprintf(sqlbuf, "select mimetype_pk from mimetype where mimetype_name='application/x-debian-package'"); result = PQexec(pgConn, sqlbuf); if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1; if (PQntuples(result) == 0) { printf("FATAL: (%s.%d) Missing application/x-debian-package mimetype.\n",__FILE__,__LINE__); return -1; } DEB_BINARY = atoi(PQgetvalue(result, 0, 0)); PQclear(result); sprintf(sqlbuf, "select mimetype_pk from mimetype where mimetype_name='application/x-debian-source'"); result = PQexec(pgConn, sqlbuf); if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1; if (PQntuples(result) == 0) { printf("FATAL: (%s.%d) Missing application/x-debian-source mimetype.\n",__FILE__,__LINE__); return -1; } DEB_SOURCE = atoi(PQgetvalue(result, 0, 0)); PQclear(result); /*** END Initialize DEB_SOURCE and DEB_BINARY ***/ /*** Record analysis start in bucket_ars, the bucket audit trail. ***/ if (0 == rerun) { // do not have any bucket scan on this upload snprintf(sqlbuf, sizeof(sqlbuf), "insert into bucket_ars (agent_fk, upload_fk, ars_success, nomosagent_fk, bucketpool_fk) values(%d,%d,'%s',%d,%d)", agent_pk, uploadtree.upload_fk, "false", nomos_agent_pk, bucketpool_pk); if (debug) printf("%s(%d): %s\n", __FILE__, __LINE__, sqlbuf); result = PQexec(pgConn, sqlbuf); if (fo_checkPQcommand(pgConn, result, sqlbuf, __FILE__ ,__LINE__)) return -1; PQclear(result); /* retrieve the ars_pk of the newly inserted record */ sprintf(sqlbuf, "select ars_pk from bucket_ars where agent_fk='%d' and upload_fk='%d' and ars_success='%s' and nomosagent_fk='%d' \ and bucketpool_fk='%d' and ars_endtime is null \ order by ars_starttime desc limit 1", agent_pk, uploadtree.upload_fk, "false", nomos_agent_pk, bucketpool_pk); result = PQexec(pgConn, sqlbuf); if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1; if (PQntuples(result) == 0) { printf("FATAL: (%s.%d) Missing bucket_ars record.\n%s\n",__FILE__,__LINE__,sqlbuf); return -1; } ars_pk = atol(PQgetvalue(result, 0, 0)); PQclear(result); } /*** END bucket_ars insert ***/ if (debug) printf("%s sql: %s\n",__FILE__, sqlbuf); /* process the tree for buckets Do this as a single transaction, therefore this agent must be run as a single thread. This will prevent the scheduler from consuming excess time (this is a fast agent), and allow this process to update bucket_ars. */ rv = walkTree(pgConn, bucketDefArray, agent_pk, head_uploadtree_pk, 0, hasPrules); /* if no errors and top level is a container, process the container */ if ((!rv) && (IsContainer(uploadtree.ufile_mode))) { rv = processFile(pgConn, bucketDefArray, &uploadtree, agent_pk, hasPrules); } /* Record analysis end in bucket_ars, the bucket audit trail. */ if (0 == rerun && ars_pk) { if (rv) snprintf(sqlbuf, sizeof(sqlbuf), "update bucket_ars set ars_endtime=now(), ars_success=false where ars_pk='%d'", ars_pk); else snprintf(sqlbuf, sizeof(sqlbuf), "update bucket_ars set ars_endtime=now(), ars_success=true where ars_pk='%d'", ars_pk); if (debug) printf("%s(%d): %s\n", __FILE__, __LINE__, sqlbuf); result = PQexec(pgConn, sqlbuf); if (fo_checkPQcommand(pgConn, result, sqlbuf, __FILE__ ,__LINE__)) return -1; PQclear(result); } } /* end of main processing loop */
/** * \brief Get the mimetype for a package * \param argc the number of command line arguments * \param argv the command line arguments * \return 0 on a successful program execution */ int main(int argc, char *argv[]) { int arg; char *Parm = NULL; char *Path = NULL; int c; char *agent_desc = "Determines mimetype for each file"; int pfile_count = 0; int Agent_pk; int ars_pk = 0; int upload_pk = 0; // the upload primary key int user_pk = 0; char *AgentARSName = "mimetype_ars"; int rv; PGresult *result; char sqlbuf[1024]; int CmdlineFlag = 0; ///< run from command line flag, 1 yes, 0 not char *COMMIT_HASH; char *VERSION; char agent_rev[MAXCMD]; /* initialize the scheduler connection */ fo_scheduler_connect(&argc, argv, &pgConn); /* Process command-line */ while((c = getopt(argc,argv,"iCc:hvV")) != -1) { switch(c) { case 'i': PQfinish(pgConn); return(0); case 'c': /* do nothing with this option */ break; case 'C': CmdlineFlag = 1; break; case 'v': agent_verbose++; break; case 'V': printf("%s", BuildVersion); PQfinish(pgConn); return(0); default: Usage(argv[0]); PQfinish(pgConn); exit(-1); } } COMMIT_HASH = fo_sysconfig("mimetype", "COMMIT_HASH"); VERSION = fo_sysconfig("mimetype", "VERSION"); sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH); /* Get the Agent Key from the DB */ Agent_pk = fo_GetAgentKey(pgConn, basename(argv[0]), 0, agent_rev, agent_desc); FMimetype = fopen("/etc/mime.types","rb"); if (!FMimetype) { LOG_WARNING("Unable to open /etc/mime.types\n"); } MagicCookie = magic_open(MAGIC_PRESERVE_ATIME|MAGIC_MIME); if (MagicCookie == NULL) { LOG_FATAL("Failed to initialize magic cookie\n"); PQfinish(pgConn); exit(-1); } if (magic_load(MagicCookie,NULL) != 0) { LOG_FATAL("Failed to load magic file: UnMagic\n"); PQfinish(pgConn); exit(-1); } /* Run from the command-line (for testing) */ for(arg=optind; arg < argc; arg++) { Akey = -1; memset(A,'\0',sizeof(A)); strncpy(A,argv[arg],sizeof(A)); DBCheckMime(A); } /* Run from scheduler! */ if (0 == CmdlineFlag) { user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */ while(fo_scheduler_next()) { /* get piece of information, including upload_pk, others */ Parm = fo_scheduler_current(); if (Parm && Parm[0]) { upload_pk = atoi(Parm); /* Check Permissions */ if (GetUploadPerm(pgConn, upload_pk, user_pk) < PERM_WRITE) { LOG_ERROR("You have no update permissions on upload %d", upload_pk); continue; } /* does ars table exist? * If not, create it. */ rv = fo_tableExists(pgConn, AgentARSName); if (!rv) { rv = fo_CreateARSTable(pgConn, AgentARSName); if (!rv) return(0); } /* check ars table if this is duplicate request*/ memset(sqlbuf, 0, sizeof(sqlbuf)); snprintf(sqlbuf, sizeof(sqlbuf), "select ars_pk from mimetype_ars,agent \ where agent_pk=agent_fk and ars_success=true \ and upload_fk='%d' and agent_fk='%d'", upload_pk, Agent_pk); result = PQexec(pgConn, sqlbuf); if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) exit(-1); if (PQntuples(result) > 0) { PQclear(result); LOG_WARNING("Ignoring requested mimetype analysis of upload %d - Results are already in database.\n",upload_pk); continue; } PQclear(result); /* Record analysis start in mimetype_ars, the mimetype audit trail. */ ars_pk = fo_WriteARS(pgConn, ars_pk, upload_pk, Agent_pk, AgentARSName, 0, 0); /* get all pfile ids on a upload record */ memset(sqlbuf, 0, sizeof(sqlbuf)); snprintf(sqlbuf, sizeof(sqlbuf), "SELECT DISTINCT(pfile_pk) as Akey, pfile_sha1 || '.' || pfile_md5 || '.' || pfile_size AS A FROM uploadtree, pfile WHERE uploadtree.pfile_fk = pfile.pfile_pk AND pfile_mimetypefk is NULL AND upload_fk = '%d';", upload_pk); result = PQexec(pgConn, sqlbuf); if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) exit(-1); pfile_count = PQntuples(result); int i; for(i=0; i < pfile_count; i++) { Akey = atoi(PQgetvalue(result, i, 0)); strncpy(A, PQgetvalue(result, i, 1), sizeof(A)); if (Akey <= 0 || A[0]=='\0') { printf("ERROR: Data is in an unknown format.\n"); PQfinish(pgConn); exit(-1); } /* Process the repository file */ /* Find the path */ Path = fo_RepMkPath("files",A); if (Path && fo_RepExist("files",A)) { /* Get the mimetype! */ DBCheckMime(Path); } else { printf("ERROR pfile %d Unable to process.\n",Akey); printf("LOG pfile %d File '%s' not found.\n",Akey,A); PQfinish(pgConn); exit(-1); } /* Clean up Path memory */ if(Path) { free(Path); Path = NULL; } fo_scheduler_heart(1); } PQclear(result); /* Record analysis success in mimetype_ars. */ if (ars_pk) fo_WriteARS(pgConn, ars_pk, upload_pk, Agent_pk, AgentARSName, 0, 1); } } } /* if run from scheduler */ /* Clean up */ if (FMimetype) fclose(FMimetype); magic_close(MagicCookie); if (DBMime) PQclear(DBMime); if (pgConn) PQfinish(pgConn); /* after cleaning up agent, disconnect from the scheduler, this doesn't return */ fo_scheduler_disconnect(0); return(0); } /* main() */
/** * \brief main function for the pkgagent * * There are 2 ways to use the pkgagent agent: * 1. Command Line Analysis :: test a rpm file from the command line * 2. Agent Based Analysis :: run from the scheduler * * +-----------------------+ * | Command Line Analysis | * +-----------------------+ * * To analyze a rpm file from the command line: * file :: if files are rpm package listed, display their meta data * -v :: verbose (-vv = more verbose) * * example: * $ ./pkgagent rpmfile * * +----------------------+ * | Agent Based Analysis | * +----------------------+ * * To run the pkgagent as an agent simply run with no command line args * no file :: process data from the scheduler * -i :: initialize the database, then exit * * example: * $ upload_pk | ./pkgagent * * \param argc the number of command line arguments * \param argv the command line arguments * \return 0 on a successful program execution */ int main (int argc, char *argv[]) { int c; char *agent_desc = "Pulls metadata out of RPM or DEBIAN packages"; //struct rpmpkginfo *glb_rpmpi; //struct debpkginfo *glb_debpi; int Agent_pk; int ars_pk = 0; int upload_pk = 0; // the upload primary key int user_pk = 0; // the upload primary key char *AgentARSName = "pkgagent_ars"; int rv; PGresult *ars_result; char sqlbuf[1024]; char *COMMIT_HASH; char *VERSION; char agent_rev[MAXCMD]; int CmdlineFlag = 0; /* run from command line flag, 1 yes, 0 not */ fo_scheduler_connect(&argc, argv, &db_conn); //glb_rpmpi = (struct rpmpkginfo *)malloc(sizeof(struct rpmpkginfo)); //glb_debpi = (struct debpkginfo *)malloc(sizeof(struct debpkginfo)); COMMIT_HASH = fo_sysconfig("pkgagent", "COMMIT_HASH"); VERSION = fo_sysconfig("pkgagent", "VERSION"); sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH); Agent_pk = fo_GetAgentKey(db_conn, basename(argv[0]), 0, agent_rev, agent_desc); /* Process command-line */ while((c = getopt(argc,argv,"ic:CvVh")) != -1) { switch(c) { case 'i': PQfinish(db_conn); /* DB was opened above, now close it and exit */ exit(0); case 'v': Verbose++; break; case 'c': break; /* handled by fo_scheduler_connect() */ case 'C': CmdlineFlag = 1; break; case 'V': printf("%s", BuildVersion); PQfinish(db_conn); return(0); default: Usage(argv[0]); PQfinish(db_conn); exit(-1); } } /* If no args, run from scheduler! */ if (CmdlineFlag == 0) { user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */ while(fo_scheduler_next()) { upload_pk = atoi(fo_scheduler_current()); /* Check Permissions */ if (GetUploadPerm(db_conn, upload_pk, user_pk) < PERM_WRITE) { LOG_ERROR("You have no update permissions on upload %d", upload_pk); continue; } if (Verbose) { printf("PKG: pkgagent read %d\n", upload_pk); } if (upload_pk ==0) continue; /* check if pkgagent ars table exist? * if exist, check duplicate request * if not exist, don't check duplicate request */ rv = fo_tableExists(db_conn, AgentARSName); if (rv) { /* check ars table to see if this is duplicate request*/ snprintf(sqlbuf, sizeof(sqlbuf), "select ars_pk from pkgagent_ars,agent \ where agent_pk=agent_fk and ars_success=true \ and upload_fk='%d' and agent_fk='%d'", upload_pk, Agent_pk); ars_result = PQexec(db_conn, sqlbuf); if (fo_checkPQresult(db_conn, ars_result, sqlbuf, __FILE__, __LINE__)) exit(-1); if (PQntuples(ars_result) > 0) { PQclear(ars_result); LOG_WARNING("Ignoring requested pkgagent analysis of upload %d - Results are already in database.\n",upload_pk); continue; } PQclear(ars_result); } /* Record analysis start in pkgagent_ars, the pkgagent audit trail. */ ars_pk = fo_WriteARS(db_conn, ars_pk, upload_pk, Agent_pk, AgentARSName, 0, 0); /* process the upload_pk pkgagent */ if(ProcessUpload(upload_pk) != 0) return -1; /* Record analysis success in pkgagent_ars. */ if (ars_pk) fo_WriteARS(db_conn, ars_pk, upload_pk, Agent_pk, AgentARSName, 0, 1); } } else { if (Verbose) { printf("DEBUG: running in cli mode, processing file(s)\n"); } for (; optind < argc; optind++) { struct rpmpkginfo *rpmpi; rpmpi = (struct rpmpkginfo *)malloc(sizeof(struct rpmpkginfo)); rpmReadConfigFiles(NULL, NULL); //if(ProcessUpload(atoi(argv[optind])) == 0) if(GetMetadata(argv[optind],rpmpi) != -1) printf("OK\n"); else printf("Fail\n"); #ifdef _RPM_4_4_COMPAT rpmFreeCrypto(); int i; for(i=0; i< rpmpi->req_size; i++) free(rpmpi->requires[i]); #endif /* After RPM4.4 version*/ free(rpmpi->requires); free(rpmpi); rpmFreeMacros(NULL); } } PQfinish(db_conn); fo_scheduler_disconnect(0); return(0); } /* main() */
int main(int argc, char *argv[]) { int Pid; int c; int rvExist1=0, rvExist2=0; PGresult *result; char *NewDir="."; char *AgentName = "ununpack"; char *AgentARSName = "ununpack_ars"; char *agent_desc = "Unpacks archives (iso, tar, etc)"; int Recurse=0; int ars_pk = 0; int user_pk = 0; long Pfile_size = 0; char *ListOutName=NULL; char *Fname = NULL; char *FnameCheck = NULL; char *COMMIT_HASH; char *VERSION; char agent_rev[PATH_MAX]; struct stat Stat; /* connect to the scheduler */ fo_scheduler_connect(&argc, argv, &pgConn); while((c = getopt(argc,argv,"ACc:d:FfHhL:m:PQiqRr:T:t:U:VvXx")) != -1) { switch(c) { case 'A': SetContainerArtifact=0; break; case 'C': ForceContinue=1; break; case 'c': break; /* handled by fo_scheduler_connect() */ case 'd': /* if there is a %U in the path, substitute a unique ID */ NewDir=PathCheck(optarg); break; case 'F': UseRepository=1; break; case 'f': ForceDuplicate=1; break; case 'L': ListOutName=optarg; break; case 'm': MaxThread = atoi(optarg); if (MaxThread < 1) MaxThread=1; break; case 'P': PruneFiles=1; break; case 'R': Recurse=-1; break; case 'r': Recurse=atoi(optarg); break; case 'i': if (!IsExe("dpkg-source",Quiet)) LOG_WARNING("dpkg-source is not available on this system. This means that debian source packages will NOT be unpacked."); SafeExit(0); break; /* never reached */ case 'Q': UseRepository=1; user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */ /* Get the upload_pk from the scheduler */ if((Upload_Pk = fo_scheduler_next()) == NULL) SafeExit(0); break; case 'q': Quiet=1; break; case 'T': memset(REP_GOLD,0,sizeof(REP_GOLD)); strncpy(REP_GOLD,optarg,sizeof(REP_GOLD)-1); break; case 't': memset(REP_FILES,0,sizeof(REP_FILES)); strncpy(REP_FILES,optarg,sizeof(REP_FILES)-1); break; case 'U': UseRepository = 1; Recurse = -1; Upload_Pk = optarg; break; case 'V': printf("%s", BuildVersion);SafeExit(0); case 'v': Verbose++; break; case 'X': UnlinkSource=1; break; case 'x': UnlinkAll=1; break; default: Usage(argv[0], BuildVersion); SafeExit(25); } } /* Open DB and Initialize CMD table */ if (UseRepository) { /* Check Permissions */ if (GetUploadPerm(pgConn, atoi(Upload_Pk), user_pk) < PERM_WRITE) { LOG_ERROR("You have no update permissions on upload %s", Upload_Pk); SafeExit(100); } COMMIT_HASH = fo_sysconfig(AgentName, "COMMIT_HASH"); VERSION = fo_sysconfig(AgentName, "VERSION"); sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH); /* Get the unpack agent key */ agent_pk = fo_GetAgentKey(pgConn, AgentName, atoi(Upload_Pk), agent_rev,agent_desc); InitCmd(); /* Make sure ars table exists */ if (!fo_CreateARSTable(pgConn, AgentARSName)) SafeExit(0); /* Has this user previously unpacked this upload_pk successfully? * In this case we are done. No new ars record is needed since no * processing is initiated. * The unpack version is ignored. */ snprintf(SQL,MAXSQL, "SELECT ars_pk from %s where upload_fk='%s' and ars_success=TRUE", AgentARSName, Upload_Pk); result = PQexec(pgConn, SQL); if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(101); if (PQntuples(result) > 0) /* if there is a value */ { PQclear(result); LOG_WARNING("Upload_pk %s, has already been unpacked. No further action required", Upload_Pk) SafeExit(0); } PQclear(result); /* write the unpack_ars start record */ ars_pk = fo_WriteARS(pgConn, ars_pk, atoi(Upload_Pk), agent_pk, AgentARSName, 0, 0); /* Get Pfile path and Pfile_Pk, from Upload_Pk */ snprintf(SQL,MAXSQL, "SELECT pfile.pfile_sha1 || '.' || pfile.pfile_md5 || '.' || pfile.pfile_size AS pfile, pfile_fk, pfile_size FROM upload INNER JOIN pfile ON upload.pfile_fk = pfile.pfile_pk WHERE upload.upload_pk = '%s'", Upload_Pk); result = PQexec(pgConn, SQL); if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(102); if (PQntuples(result) > 0) /* if there is a value */ { Pfile = strdup(PQgetvalue(result,0,0)); Pfile_Pk = strdup(PQgetvalue(result,0,1)); Pfile_size = atol(PQgetvalue(result, 0, 2)); if (Pfile_size == 0) { PQclear(result); LOG_WARNING("Uploaded file (Upload_pk %s), is zero length. There is nothing to unpack.", Upload_Pk) SafeExit(0); } PQclear(result); } // Determine if uploadtree records should go into a separate table. // If the input file size is > 500MB, then create a separate uploadtree_{upload_pk} table // that inherits from the master uploadtree table. // Save uploadtree_tablename, it will get written to upload.uploadtree_tablename later. if (Pfile_size > 500000000) { sprintf(uploadtree_tablename, "uploadtree_%s", Upload_Pk); if (!fo_tableExists(pgConn, uploadtree_tablename)) { snprintf(SQL,MAXSQL,"CREATE TABLE %s (LIKE uploadtree INCLUDING DEFAULTS INCLUDING CONSTRAINTS INCLUDING INDEXES); ALTER TABLE %s ADD CONSTRAINT %s CHECK (upload_fk=%s); ALTER TABLE %s INHERIT uploadtree", uploadtree_tablename, uploadtree_tablename, uploadtree_tablename, Upload_Pk, uploadtree_tablename); PQsetNoticeProcessor(pgConn, SQLNoticeProcessor, SQL); // ignore notice about implicit primary key index creation result = PQexec(pgConn, SQL); // Ignore postgres notice about creating an implicit index if (PQresultStatus(result) != PGRES_NONFATAL_ERROR) if (fo_checkPQcommand(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(103); PQclear(result); } } else strcpy(uploadtree_tablename, "uploadtree_a"); } CheckCommands(Quiet); if (NewDir) MkDir(NewDir); if (Verbose) { fclose(stderr) ; stderr=stdout; } /* don't interlace! */ if (ListOutName != NULL) { if ((ListOutName[0]=='-') && (ListOutName[1]=='\0')) ListOutFile=stdout; else ListOutFile = fopen(ListOutName,"w"); if (!ListOutFile) { LOG_ERROR("pfile %s Unable to write to %s\n",Pfile_Pk,ListOutName) SafeExit(104); } else { /* Start the file */ fputs("<xml tool=\"ununpack\" ",ListOutFile); fputs("version=\"",ListOutFile); fputs(Version,ListOutFile); fputs("\" ",ListOutFile); fputs("compiled_date=\"",ListOutFile); fputs(__DATE__,ListOutFile); fputs(" ",ListOutFile); fputs(__TIME__,ListOutFile); fputs("\"",ListOutFile); fputs(">\n",ListOutFile); } /* Problem: When parallel processing, the XML may be generated out of order. Solution? When using XML, only use 1 thread. */ MaxThread=1; } // Set ReunpackSwitch if the uploadtree records are missing from the database. if (!ReunpackSwitch && UseRepository) { snprintf(SQL,MAXSQL,"SELECT uploadtree_pk FROM uploadtree WHERE upload_fk=%s limit 1;",Upload_Pk); result = PQexec(pgConn, SQL); if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(105); if (PQntuples(result) == 0) ReunpackSwitch=1; PQclear(result); } /*** process files from command line ***/ for( ; optind<argc; optind++) { CksumFile *CF=NULL; Cksum *Sum; int i; if (Fname) { free(Fname); Fname=NULL; } if (ListOutName != NULL) { fprintf(ListOutFile,"<source source=\"%s\" ",argv[optind]); if (UseRepository && !fo_RepExist(REP_FILES,argv[optind])) { /* make sure the source exists in the src repository */ if (fo_RepImport(argv[optind],REP_FILES,argv[optind],1) != 0) { LOG_ERROR("Failed to import '%s' as '%s' into the repository",argv[optind],argv[optind]) SafeExit(106); } } } if (UseRepository) { if (fo_RepExist(REP_FILES,argv[optind])) { Fname=fo_RepMkPath(REP_FILES,argv[optind]); } else if (fo_RepExist(REP_GOLD,argv[optind])) { Fname=fo_RepMkPath(REP_GOLD,argv[optind]); if (fo_RepImport(Fname,REP_FILES,argv[optind],1) != 0) { LOG_ERROR("Failed to import '%s' as '%s' into the repository",Fname,argv[optind]) SafeExit(107); } } if (Fname) { FnameCheck = Fname; CF = SumOpenFile(Fname); } else { LOG_ERROR("NO file unpacked. File %s does not exist either in GOLD or FILES", Pfile); SafeExit(108); } /* else: Fname is NULL and CF is NULL */ } else { FnameCheck = argv[optind]; CF = SumOpenFile(argv[optind]); } /* Check file to unpack. Does it exist? Is it zero length? */ if (stat(FnameCheck,&Stat)) { LOG_ERROR("File to unpack is unavailable: %s, error: %s", Fname, strerror(errno)); SafeExit(109); } else if (Stat.st_size < 1) { LOG_WARNING("File to unpack is empty: %s", Fname); SafeExit(110); } if (ListOutFile) { if (CF) { Sum = SumComputeBuff(CF); SumCloseFile(CF); if (Sum) { fputs("fuid=\"",ListOutFile); for(i=0; i<20; i++) { fprintf(ListOutFile,"%02X",Sum->SHA1digest[i]); } fputs(".",ListOutFile); for(i=0; i<16; i++) { fprintf(ListOutFile,"%02X",Sum->MD5digest[i]); } fputs(".",ListOutFile); fprintf(ListOutFile,"%Lu",(long long unsigned int)Sum->DataLen); fputs("\" ",ListOutFile); free(Sum); } /* if Sum */ } /* if CF */ else /* file too large to mmap (probably) */ { FILE *Fin; Fin = fopen(argv[optind],"rb"); if (Fin) { Sum = SumComputeFile(Fin); if (Sum) { fputs("fuid=\"",ListOutFile); for(i=0; i<20; i++) { fprintf(ListOutFile,"%02X",Sum->SHA1digest[i]); } fputs(".",ListOutFile); for(i=0; i<16; i++) { fprintf(ListOutFile,"%02X",Sum->MD5digest[i]); } fputs(".",ListOutFile); fprintf(ListOutFile,"%Lu",(long long unsigned int)Sum->DataLen); fputs("\" ",ListOutFile); free(Sum); } fclose(Fin); } } /* else no CF */ fprintf(ListOutFile,">\n"); /* end source XML */ } if (Fname) TraverseStart(Fname,"called by main via args",NewDir,Recurse); else TraverseStart(argv[optind],"called by main",NewDir,Recurse); if (ListOutName != NULL) fprintf(ListOutFile,"</source>\n"); } /* end for */ /* free memory */ if (Fname) { free(Fname); Fname=NULL; } /* process pfile from scheduler */ if (Pfile) { if (0 == (rvExist1 = fo_RepExist2(REP_FILES,Pfile))) { Fname=fo_RepMkPath(REP_FILES,Pfile); } else if (0 == (rvExist2 = fo_RepExist2(REP_GOLD,Pfile))) { Fname=fo_RepMkPath(REP_GOLD,Pfile); if (fo_RepImport(Fname,REP_FILES,Pfile,1) != 0) { LOG_ERROR("Failed to import '%s' as '%s' into the repository",Fname,Pfile) SafeExit(111); } } if (Fname) { TraverseStart(Fname,"called by main via env",NewDir,Recurse); free(Fname); Fname=NULL; } else { LOG_ERROR("NO file unpacked!"); if (rvExist1 > 0) { Fname=fo_RepMkPath(REP_FILES, Pfile); LOG_ERROR("Error is %s for %s", strerror(rvExist1), Fname); } if (rvExist2 > 0) { Fname=fo_RepMkPath(REP_GOLD, Pfile); LOG_ERROR("Error is %s for %s", strerror(rvExist2), Fname); } SafeExit(112); } } /* recurse on all the children */ if (Thread > 0) do { Pid = ParentWait(); Thread--; if (Pid >= 0) { if (!Queue[Pid].ChildEnd) { /* copy over data */ if (Recurse > 0) Traverse(Queue[Pid].ChildRecurse,NULL,"called by wait",NULL,Recurse-1,&Queue[Pid].PI); else if (Recurse < 0) Traverse(Queue[Pid].ChildRecurse,NULL,"called by wait",NULL,Recurse,&Queue[Pid].PI); } } } while(Pid >= 0); if (MagicCookie) magic_close(MagicCookie); if (ListOutFile) { fprintf(ListOutFile,"<summary files_regular=\"%d\" files_compressed=\"%d\" artifacts=\"%d\" directories=\"%d\" containers=\"%d\" />\n", TotalFiles,TotalCompressedFiles,TotalArtifacts, TotalDirectories,TotalContainers); fputs("</xml>\n",ListOutFile); } if (pgConn) { /* If it completes, mark it! */ if (Upload_Pk) { snprintf(SQL,MAXSQL,"UPDATE upload SET upload_mode = (upload_mode | (1<<5)), uploadtree_tablename='%s' WHERE upload_pk = '%s';",uploadtree_tablename, Upload_Pk); result = PQexec(pgConn, SQL); /* UPDATE upload */ if (fo_checkPQcommand(pgConn, result, SQL, __FILE__ ,__LINE__)) SafeExit(113); PQclear(result); snprintf(SQL,MAXSQL,"UPDATE %s SET realparent = getItemParent(uploadtree_pk) WHERE upload_fk = '%s'",uploadtree_tablename, Upload_Pk); result = PQexec(pgConn, SQL); /* UPDATE uploadtree */ if (fo_checkPQcommand(pgConn, result, SQL, __FILE__ ,__LINE__)) SafeExit(114); PQclear(result); } if (ars_pk) fo_WriteARS(pgConn, ars_pk, atoi(Upload_Pk), agent_pk, AgentARSName, 0, 1); } if (ListOutFile && (ListOutFile != stdout)) { fclose(ListOutFile); } if (UnlinkAll && MaxThread > 1) { /* Delete temporary files */ if (strcmp(NewDir, ".")) RemoveDir(NewDir); } SafeExit(0); return(0); // never executed but makes the compiler happy }
/** * @brief main function for the copyright agent * * The copyright agent is used to automatically locate copyright statements * found in code. * * There are 3 ways to use the copyright agent: * 1. Command Line Analysis :: test a file from the command line * 2. Agent Based Analysis :: waits for commands from stdin * 3. Accuracy Test :: tests the accuracy of the copyright agent * * +-----------------------+ * | Command Line Analysis | * +-----------------------+ * * To analyze a file from the command line: * -C <filename> :: run copyright agent from command line * -d :: turn on debugging information * -T <Copyright Statements | URLs| Emails> :: Copyright Statements | URLs |Emails * * example: * $ ./copyright -C myfiletoscan * * +----------------------+ * | Agent Based Analysis | * +----------------------+ * * To run the copyright agent as an agent simply run with no command line args * -i :: initialize a connection to the database * -d :: turn on debugging information * * example: * $ upload_pk | ./copyright * * +---------------+ * | Accuracy Test | * +---------------+ * * To test the accuracy of the copyright agent run with a -t. Make sure to run the * accuracy tests in the source directory with the testdata directory: * -t :: run the accuracy analysis * * example: * $ ./copyright -t * * Running the tests will create 3 files: * 1. Matches: contains all of the matches found by the copyright agent, information * includes what file the match was found in, the dictionary element * that it matched, the name that it matched and the text that was found * 2. False_Positives: contains all of the false positives found by the agent, * information in the file includes the file the false positive was * in, the dictionary match, the name match, and the text * 3. Flase_Negatives: contains all of the false negatives found by the agent, * information in the file includes the file the false negative was * in, and the text of the false negative * * NOTE: -d will produces the exact same style of Matches file that the accuracy * testing does. Currently this is the only thing that -d will produce * * @param argc the number of command line arguments * @param argv the command line arguments * @return 0 on a successful program execution */ int main(int argc, char** argv) { /* primitives */ char sql[512]; // buffer for database access int c, i = -1; // temporary int containers int num_files = 0; // the number of rows in a job int ars_pk = 0; // the args primary key int user_pk = 0; long upload_pk = 0; // the upload primary key long agent_pk = 0; // the agents primary key char *SVN_REV = NULL; char *VERSION = NULL; char agent_rev[myBUFSIZ]; char copy_buf[FILENAME_MAX]; char name_buf[FILENAME_MAX]; int report_type = 7; // defaul as all. binary xxx 1st number as email, 2nd number as url, 3rd number as statement int cli_run = 0; // when run from command line, that mean -C option is set; 1: yes, 0: no /* Database structs */ PGconn* pgConn = NULL; // the connection to Database PGresult* pgResult = NULL; // result of a database access /* copyright structs */ copyright copy; // the work horse of the copyright agent pair curr; // pair to push into the file list /* verbose data */ FILE* mout = NULL; /* set the output streams */ cout = stdout; cerr = stdout; cin = stdin; /* connect to the scheduler */ fo_scheduler_connect(&argc, argv, &pgConn); /* initialize complex data strcutres */ memset(copy_buf, '\0', sizeof(copy_buf)); memset(name_buf, '\0', sizeof(copy_buf)); snprintf(copy_buf, sizeof(copy_buf), "%s/mods-enabled/copyright/agent/copyright.dic", sysconfigdir); snprintf(name_buf, sizeof(name_buf), "%s/mods-enabled/copyright/agent/names.dic", sysconfigdir); if(!copyright_init(©, copy_buf, name_buf)) { fprintf(cerr, "FATAL %s.%d: copyright initialization failed\n", __FILE__, __LINE__); fprintf(cerr, "FATAL %s\n", strerror(errno)); fflush(cerr); return 1; } /* parse the command line options */ while((c = getopt(argc, argv, "T:dc:C:tiVvh")) != -1) { switch(c) { case 'v': /* debugging */ mout = fopen("Matches", "w"); if(!mout) { fprintf(cerr, "ERROR could not open Matches for logging\n"); fflush(cerr); } else { verbose = 1; } break; case 'C': /* run from command line */ cli_run = 1; pair_init(&curr, string_function_registry(), int_function_registry()); pair_set_first(curr, optarg); pair_set_second(curr, &i); num_files++; break; case 'T': /* report type, Copyright Statements | URLs| Emails */ report_type = atoi(optarg); printf("report_type is:%d\n", report_type); break; case 't': /* run accuracy testing */ run_test_files(copy); copyright_destroy(copy); return 0; case 'i': /* initialize database connections */ copyright_destroy(copy); PQfinish(pgConn); return 0; case 'V': printf("%s", BuildVersion); copyright_destroy(copy); PQfinish(pgConn); return 0; default: /* error, print usage */ copyright_usage(argv[0]); return 3; } } /** run from command line */ if (1 == cli_run) { perform_analysis(pgConn, copy, curr, agent_pk, mout, report_type); pair_destroy(curr); } /* if there are no files in the file list then the agent is begin run from */ /* the scheduler, open the database and grab the files to be analyzed */ if(num_files == 0) { /* create the sql copy structure */ sqlcpy = fo_sqlCopyCreate(pgConn, "copyright", 32768, 7, "agent_fk", "pfile_fk", "copy_startbyte", "copy_endbyte", "content", "hash", "type"); /* book keeping */ pair_init(&curr, string_function_registry(), int_function_registry()); db_connected = 1; SVN_REV = fo_sysconfig("copyright", "SVN_REV"); VERSION = fo_sysconfig("copyright", "VERSION"); sprintf(agent_rev, "%s.%s", VERSION, SVN_REV); agent_pk = fo_GetAgentKey(pgConn, AGENT_NAME, 0, agent_rev, AGENT_DESC); /* make sure that we are connected to the database */ if(!check_copyright_table(pgConn)) { return 5; } user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */ /* enter the main agent loop */ while(fo_scheduler_next()) { upload_pk = atol(fo_scheduler_current()); /* Check Permissions */ if (GetUploadPerm(pgConn, upload_pk, user_pk) < PERM_WRITE) { LOG_ERROR("You have no update permissions on upload %ld", upload_pk); continue; } ars_pk = fo_WriteARS(pgConn, 0, upload_pk, agent_pk, AGENT_ARS, NULL, 0); sprintf(sql, fetch_pfile, upload_pk, agent_pk, agent_pk); pgResult = PQexec(pgConn, sql); num_files = PQntuples(pgResult); for(i = 0; i < num_files; i++) { c = atoi(PQgetvalue(pgResult, i, PQfnumber(pgResult, "pfile_pk"))); pair_set_first(curr, PQgetvalue(pgResult, i, PQfnumber(pgResult, "pfilename"))); pair_set_second(curr, &c); perform_analysis(pgConn, copy, curr, agent_pk, mout, REPORTALL); } fo_WriteARS(pgConn, ars_pk, upload_pk, agent_pk, AGENT_ARS, NULL, 1); PQclear(pgResult); } pair_destroy(curr); } if(db_connected) { fo_sqlCopyDestroy(sqlcpy, 1); PQfinish(pgConn); } if(verbose) { fclose(mout); } copyright_destroy(copy); fo_scheduler_disconnect(0); return 0; }
int main (int argc, char *argv[]) { int arg; char *Parm = NULL; char *TempFileDir=NULL; int c; int InitFlag=0; int CmdlineFlag = 0; /** run from command line flag, 1 yes, 0 not */ int user_pk; char *agent_desc = "Network downloader. Uses wget(1)."; memset(GlobalTempFile,'\0',MAXCMD); memset(GlobalURL,'\0',MAXCMD); memset(GlobalParam,'\0',MAXCMD); memset(GlobalType,'\0',MAXCMD); GlobalUploadKey = -1; int upload_pk = 0; // the upload primary key //int Agent_pk; char *SVN_REV; char *VERSION; char agent_rev[MAXCMD]; /* open the connection to the scheduler and configuration */ fo_scheduler_connect(&argc, argv, &pgConn); /* Process command-line */ while((c = getopt(argc,argv,"d:Gg:ik:A:R:l:Cc:Vvh")) != -1) { switch(c) { case 'd': TempFileDir = PathCheck(optarg); break; case 'g': { struct group *SG; SG = getgrnam(optarg); if (SG) ForceGroup = SG->gr_gid; } break; case 'G': GlobalImportGold=0; break; case 'i': InitFlag=1; break; case 'k': GlobalUploadKey = atol(optarg); if (!GlobalTempFile[0]) strcpy(GlobalTempFile,"wget.default_download"); break; case 'A': sprintf(GlobalParam, "%s -A %s ",GlobalParam, optarg); break; case 'R': sprintf(GlobalParam, "%s -R %s ",GlobalParam, optarg); break; case 'l': sprintf(GlobalParam, "%s -l %s ",GlobalParam, optarg); break; case 'c': break; /* handled by fo_scheduler_connect() */ case 'C': CmdlineFlag = 1; break; case 'v': break; case 'V': printf("%s", BuildVersion); SafeExit(0); default: Usage(argv[0]); SafeExit(-1); } } if (argc - optind > 1) { Usage(argv[0]); SafeExit(-1); } /* When initializing the DB, don't do anything else */ if (InitFlag) { if (pgConn) PQfinish(pgConn); SafeExit(0); } SVN_REV = fo_sysconfig("wget_agent", "SVN_REV"); VERSION = fo_sysconfig("wget_agent", "VERSION"); sprintf(agent_rev, "%s.%s", VERSION, SVN_REV); /* Get the Agent Key from the DB */ fo_GetAgentKey(pgConn, basename(argv[0]), GlobalUploadKey, agent_rev, agent_desc); /** get proxy */ GetProxy(); /* Run from the command-line (for testing) */ for(arg=optind; arg < argc; arg++) { memset(GlobalURL,'\0',sizeof(GlobalURL)); strncpy(GlobalURL,argv[arg],sizeof(GlobalURL)); /* If the file contains "://" then assume it is a URL. Else, assume it is a file. */ LOG_VERBOSE0("Command-line: %s",GlobalURL); if (strstr(GlobalURL,"://")) { fo_scheduler_heart(1); LOG_VERBOSE0("It's a URL"); if (GetURL(GlobalTempFile,GlobalURL,TempFileDir) != 0) { LOG_FATAL("Download of %s failed.",GlobalURL); SafeExit(21); } if (GlobalUploadKey != -1) { DBLoadGold(); } unlink(GlobalTempFile); } else /* must be a file */ { LOG_VERBOSE0("It's a file -- GlobalUploadKey = %ld",GlobalUploadKey); if (GlobalUploadKey != -1) { memcpy(GlobalTempFile,GlobalURL,MAXCMD); DBLoadGold(); } } } /* Run from scheduler! */ if (0 == CmdlineFlag) { user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */ while(fo_scheduler_next()) { Parm = fo_scheduler_current(); /* get piece of information, including upload_pk, downloadfile url, and parameters */ if (Parm && Parm[0]) { fo_scheduler_heart(1); /* set globals: uploadpk, downloadfile url, parameters */ SetEnv(Parm,TempFileDir); upload_pk = GlobalUploadKey; /* Check Permissions */ if (GetUploadPerm(pgConn, upload_pk, user_pk) < PERM_WRITE) { LOG_ERROR("You have no update permissions on upload %d", upload_pk); continue; } char TempDir[MAXCMD]; memset(TempDir,'\0',MAXCMD); snprintf(TempDir, MAXCMD-1, "%s/wget", TempFileDir); // /var/local/lib/fossology/agents/wget struct stat Status; if (GlobalType[0]) { if (GetVersionControl() == 0) { DBLoadGold(); unlink(GlobalTempFile); } else { LOG_FATAL("upload %ld File retrieval failed: uploadpk=%ld tempfile=%s URL=%s Type=%s", GlobalUploadKey,GlobalUploadKey,GlobalTempFile,GlobalURL, GlobalType); SafeExit(23); } } else if (strstr(GlobalURL, "*") || stat(GlobalURL, &Status) == 0) { if (!Archivefs(GlobalURL, GlobalTempFile, TempFileDir, Status)) { LOG_FATAL("Failed to archieve. GlobalURL, GlobalTempFile, TempFileDir are: %s, %s, %s, " "Mode is: %lo (octal)\n", GlobalURL, GlobalTempFile, TempFileDir, (unsigned long) Status.st_mode); SafeExit(50); } DBLoadGold(); unlink(GlobalTempFile); } else { if (GetURL(GlobalTempFile,GlobalURL,TempDir) == 0) { DBLoadGold(); unlink(GlobalTempFile); } else { LOG_FATAL("upload %ld File retrieval failed: uploadpk=%ld tempfile=%s URL=%s", GlobalUploadKey,GlobalUploadKey,GlobalTempFile,GlobalURL); SafeExit(22); } } } } } /* if run from scheduler */ SafeExit(0); exit(0); /* to prevent compiler warning */ } /* main() */