示例#1
0
/**
 * \brief Add a new license to license_ref table
 *
 * Adds a license to license_ref table.
 *
 * \param  char $licenseName
 *
 * \return rf_pk for success, 0 for failure
 */
FUNCTION long add2license_ref(PGconn *pgConn, char *licenseName) 
{
    PGresult *result;
    char  query[256];
    char  insert[256];
    char  escLicName[256];
    char *specialLicenseText;
    long rf_pk;

    int len;
    int error;
    int numRows;

    // escape the name
    len = strlen(licenseName);
    PQescapeStringConn(pgConn, escLicName, licenseName, len, &error);
    if (error)
      printf("WARNING: %s(%d): Does license name have multibyte encoding?", __FILE__, __LINE__);

    /* verify the license is not already in the table */
    sprintf(query, "SELECT rf_pk FROM license_ref where rf_shortname='%s' and rf_detector_type=2", escLicName);
    result = PQexec(pgConn, query);
    if (fo_checkPQresult(pgConn, result, query, "add2license_ref", __LINE__)) return 0;
    numRows = PQntuples(result);
    if (numRows)
    {
      rf_pk = atol(PQgetvalue(result, 0, 0));
      return rf_pk;
    }

    /* Insert the new license */
    specialLicenseText = "License by Nomos.";

    sprintf( insert,
            "insert into license_ref(rf_shortname, rf_text, rf_detector_type) values('%s', '%s', 2)",
            escLicName, specialLicenseText);
    result = PQexec(pgConn, insert);
    if (fo_checkPQcommand(pgConn, result, insert, __FILE__, __LINE__)) return 0;
    PQclear(result);

    /* retrieve the new rf_pk */
    result = PQexec(pgConn, query);
    if (fo_checkPQresult(pgConn, result, query, "add2license_ref", __LINE__)) return 0;
    numRows = PQntuples(result);
    if (numRows)
      rf_pk = atol(PQgetvalue(result, 0, 0));
    else
    {
      printf("ERROR: %s:%s:%d Just inserted value is missing. On: %s", __FILE__, "add2license_ref()", __LINE__, query);
      return(0);
    }
    PQclear(result);

    return (rf_pk);
}
示例#2
0
/**
 * \brief given a container uploadtree_pk and bucketdef, determine 
 * if any child is in this bucket.
 *
 * \param PGconn $pgConn postgresql connection
 * \param pbucketdef_t $bucketDef
 * \param puploadtree_t $puploadtree
 *
 * \return 1 if child is in this bucket \n
 *        0 not in bucket \n
 *       -1 error \n
 */
FUNCTION int childInBucket(PGconn *pgConn, pbucketdef_t bucketDef, puploadtree_t puploadtree)
{
  char *fcnName = "childInBucket";
  char  sql[1024];
  int   lft, rgt, upload_pk, rv;
  PGresult *result;

  lft = puploadtree->lft;
  rgt = puploadtree->rgt;
  upload_pk = puploadtree->upload_fk;

  /* Are any children in this bucket? 
     First check bucket_container.  
     If none found, then look in bucket_file.
  */
  snprintf(sql, sizeof(sql), 
           "select uploadtree_pk from %s \
              inner join bucket_container \
                on uploadtree_fk=uploadtree_pk and bucket_fk=%d \
                   and agent_fk=%d and nomosagent_fk=%d \
            where upload_fk=%d and %s.lft BETWEEN %d and %d limit 1",
           bucketDef->uploadtree_tablename,
           bucketDef->bucket_pk, bucketDef->bucket_agent_pk, 
           bucketDef->nomos_agent_pk, upload_pk, 
           bucketDef->uploadtree_tablename,
           lft, rgt);
//  if (debug) printf("===%s:%d:\n%s\n===\n", __FILE__, __LINE__, sql);
  result = PQexec(pgConn, sql);
  if (fo_checkPQresult(pgConn, result, sql, fcnName, __LINE__)) return -1;
  rv = PQntuples(result);
  PQclear(result);
  if (rv) return 1;
  
  /* none found so look in bucket_file for any child in this bucket */
  snprintf(sql, sizeof(sql), 
           "select uploadtree_pk from %s \
              inner join bucket_file \
                on %s.pfile_fk=bucket_file.pfile_fk and bucket_fk=%d \
                   and agent_fk=%d and nomosagent_fk=%d \
            where upload_fk=%d and %s.lft BETWEEN %d and %d limit 1",
           bucketDef->uploadtree_tablename,
           bucketDef->uploadtree_tablename,
           bucketDef->bucket_pk, bucketDef->bucket_agent_pk, 
           bucketDef->nomos_agent_pk, upload_pk, 
           bucketDef->uploadtree_tablename,
           lft, rgt);
//  if (debug) printf("===%s:%d:\n%s\n===\n", __FILE__, __LINE__, sql);
  result = PQexec(pgConn, sql);
  if (fo_checkPQresult(pgConn, result, sql, fcnName, __LINE__)) return -1;
  rv = PQntuples(result);
  PQclear(result);
  if (rv) return 1;

  return 0;
}
示例#3
0
/**
 * \brief process the jobs from scheduler
 *
 * -# Read the jobs from the scheduler using fo_scheduler_next().
 * -# Get the permission level of the current user.
 * -# Parse the parameters and process
 * \see fo_scheduler_next()
 * \see readAndProcessParameter()
 */
void doSchedulerTasks()
{
  char *Parm = NULL;
  char SQL[MAXSQL];
  PGresult *result;
  int userId = -1;
  int userPerm = -1;

  while(fo_scheduler_next())
  {
    Parm = fo_scheduler_current();
    userId = fo_scheduler_userID();

    /* get perm level of user */
    snprintf(SQL,MAXSQL,"SELECT user_perm FROM users WHERE user_pk='%d';", userId);
    result = PQexec(pgConn, SQL);
    if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__) || !PQntuples(result))
    {
      exitNow(0);
    }
    userPerm = atoi(PQgetvalue(result, 0, 0));
    PQclear(result);

    int returnCode = readAndProcessParameter(Parm, userId, userPerm);
    if (returnCode != 0)
    {
      /* Loglevel is to high, but scheduler expects FATAL log message before exit */
      LOG_FATAL("Due to permission problems, the delagent was not able to list or delete the requested objects or they did not exist.");
      exitNow(returnCode);
    }
  }
}
示例#4
0
/**
 * \brief Given a user id, find detached folders and uploads
 * \param userId
 * \param userPerm permission level the user has
 * \return 0: success;
 *         1: fail;
 *        -1: failure
 */
int listFoldersFindDetatched(int userId, int userPerm)
{
  char SQL[MAXSQL];
  PGresult *result;
  int rc;

  snprintf(SQL,MAXSQL,"SELECT folder_pk,parent,name,description,upload_pk FROM folderlist ORDER BY name,parent,folder_pk;");
  result = PQexec(pgConn, SQL);
  if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__))
  {
    return -1;
  }
  rc = listFoldersFindDetatchedFolders(result, userId, userPerm);
  if (rc < 0 )
  {
    PQclear(result);
    return rc;
  }
  rc = listFoldersFindDetatchedUploads(result, userId, userPerm);
  PQclear(result);
  if (rc < 0 )
  {
    return rc;
  }
  return 0;
}
示例#5
0
FUNCTION int initLicRefCache(cacheroot_t *pcroot)
{

  PGresult *result;
  char query[myBUFSIZ];
  int row;
  int numLics;

  if (!pcroot)
    return 0;

  sprintf(query, "SELECT rf_pk, rf_shortname FROM " LICENSE_REF_TABLE " where rf_detector_type=2");
  result = PQexec(gl.pgConn, query);
  if (fo_checkPQresult(gl.pgConn, result, query, __FILE__, __LINE__))
    return 0;

  numLics = PQntuples(result);
  /* populate the cache  */
  for (row = 0; row < numLics; row++)
  {
    lrcache_add(pcroot, atol(PQgetvalue(result, row, 0)), PQgetvalue(result, row, 1));
  }

  PQclear(result);

  return (1);
} /* initLicRefCache */
示例#6
0
FUNCTION int lrcache_init(PGconn *pgConn, cacheroot_t *pcroot) 
{
    PGresult *result;
    char query[128];
    int row;
    int numLics;

    if (!pcroot) return 0;

    snprintf(query, sizeof(query),
            "SELECT rf_pk, rf_shortname FROM license_ref where rf_detector_type=2;");
    result = PQexec(pgConn, query);
    if (fo_checkPQresult(pgConn, result, query, "lrcache_init", __LINE__)) return 0;

    numLics = PQntuples(result);
    /* populate the cache  */
    for (row = 0; row < numLics; row++) 
    {
      lrcache_add(pcroot, atol(PQgetvalue(result, row, 0)), PQgetvalue(result, row, 1));
    }

    PQclear(result);

    return (1);
} /* lrcache_init */
示例#7
0
/**
 * \brief Given an uploadtree_pk of a container, find the
 * uploadtree_pk of it's children (i.e. scan down through
 * artifacts to get the children's parent
 *
 * \param PGconn $pgConn  Database connection object
 * \param int    $uploadtree_pk  
 *
 * \return uploadtree_pk of children's parent. \n
 *         Or 0 if there are no children (empty container or non-container)
 *        
 * NOTE: This function writes error to stdout
 */
FUNCTION int childParent(PGconn *pgConn, int uploadtree_pk)
{
  char *fcnName = "childParent";
  char sql[256];
  PGresult *result;
  int  childParent_pk = 0;   /* uploadtree_pk */

  do
  {
    snprintf(sql, sizeof(sql),
           "select uploadtree_pk,ufile_mode from uploadtree where parent=%d limit 1", 
           uploadtree_pk);
    result = PQexec(pgConn, sql);
    if (fo_checkPQresult(pgConn, result, sql, fcnName, __LINE__)) break;
    if (PQntuples(result) == 0) break;  /* empty container */

    /* not an artifact? */
    if ((atoi(PQgetvalue(result, 0, 1)) & 1<<28) == 0)
    {
      childParent_pk = uploadtree_pk;
      break;
    }
    uploadtree_pk = atoi(PQgetvalue(result, 0, 0));
    PQclear(result);
  } while (childParent_pk == 0);

  PQclear(result);
  return childParent_pk;
}
示例#8
0
/**
 * \brief List every folder.
 * \param userId
 * \param userPerm permission level the user has
 */
int listFolders (int userId, int userPerm)
{
  char SQL[MAXSQL];
  PGresult *result;
  int rc;

  if(userPerm == 0){
    printf("you do not have the permsssion to view the folder list.\n");
    return 1;
  }

  printf("# Folders\n");
  snprintf(SQL,MAXSQL,"SELECT folder_name from folder where folder_pk =1;");
  result = PQexec(pgConn, SQL);
  if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__))
  {
    return -1;
  }

  printf("%4d :: %s\n", 1, PQgetvalue(result,0,0));
  PQclear(result);

  rc = listFoldersRecurse(1,1,-1,0,userId,userPerm);
  if (rc < 0)
  {
    return rc;
  }

  rc = listFoldersFindDetatched(userId, userPerm);
  if (rc < 0)
  {
    return rc;
  }
  return 0;
} /* listFolders() */
示例#9
0
/**
 * \brief remove link between parent and (child,mode) if there are other parents
 *
 * \param child  id of the child to be unlinked
 * \param parent id of the parent to unlink from
 * \param mode   1<<0 child is folder_fk, 1<<1 child is upload_fk, 1<<2 child is an uploadtree_fk
 * \param userPerm permission level the user has
 *
 * \return 0: successfully deleted link (other link existed);
 *         1: was not able to delete the link (no other link to this upload existed);
 *        -1: failure
 * \todo add permission checks
 */
int unlinkContent (long child, long parent, int mode, int userId, int userPerm)
{
  int cnt, cntUpload;
  char SQL[MAXSQL];
  PGresult *result;

  if(mode == 1){
    snprintf(SQL,MAXSQL,"SELECT COUNT(DISTINCT parent_fk) FROM foldercontents WHERE foldercontents_mode=%d AND child_id=%ld",mode,child);
  }
  else{
    snprintf(SQL,MAXSQL,"SELECT COUNT(parent_fk) FROM foldercontents WHERE foldercontents_mode=%d AND"
                        " child_id in (SELECT upload_pk FROM folderlist WHERE pfile_fk="
                        "(SELECT pfile_fk FROM folderlist WHERE upload_pk=%ld limit 1))",
                        mode,child);
  }
  result = PQexec(pgConn, SQL);
  if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__))
  {
    return -1;
  }
  cnt = atoi(PQgetvalue(result,0,0));
  PQclear(result);
  if(cnt>1 && !Test)
  {
    if(mode == 2){
      snprintf(SQL,MAXSQL,"SELECT COUNT(DISTINCT parent_fk) FROM foldercontents WHERE foldercontents_mode=1 AND child_id=%ld",parent);
      result = PQexec(pgConn, SQL);
      if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__))
      {
        return -1;
      }
      cntUpload = atoi(PQgetvalue(result,0,0));
      PQclear(result);
      if(cntUpload > 1){     // check for copied/duplicate folder
        return 0;
      }
    }
    snprintf(SQL,MAXSQL,"DELETE FROM foldercontents WHERE foldercontents_mode=%d AND child_id =%ld AND parent_fk=%ld",mode,child,parent);
    PQexecCheckClear(NULL, SQL, __FILE__, __LINE__);
    return 0;
  }
  return 1;
}
示例#10
0
/**
 * \brief for function DBLoadMime
 */
void testDBLoadMime()
{
  char SQL[MAXCMD] = {0};
  PGresult *result = NULL;
  char mimetype_name[] = "application/octet-stream";
  /** delete the record mimetype_name is application/octet-stream in mimetype */
  memset(SQL, '\0', MAXCMD);
  snprintf(SQL, MAXCMD, "DELETE FROM mimetype where mimetype_name = '%s';", mimetype_name);
  result =  PQexec(pgConn, SQL);
  if (fo_checkPQcommand(pgConn, result, SQL, __FILE__, __LINE__))
  {
    PQfinish(pgConn);
    exit(-1);
  }
  PQclear(result);
  memset(SQL, '\0', MAXCMD);
  snprintf(SQL, MAXCMD, "INSERT INTO mimetype (mimetype_name) VALUES ('%s');", mimetype_name);
  result =  PQexec(pgConn, SQL);
  if (fo_checkPQcommand(pgConn, result, SQL, __FILE__, __LINE__))
  {
    PQfinish(pgConn);
    exit(-1);
  }
  PQclear(result);
  MaxDBMime = 0;
  /** exectue the tested function */
  DBLoadMime();
  /** select the record mimetype_name is application/octet-stream */
  memset(SQL, '\0', MAXCMD);
  snprintf(SQL, MAXCMD, "SELECT mimetype_name from mimetype where mimetype_name = ('%s');", mimetype_name);
  result =  PQexec(pgConn, SQL);
  if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__))
  {
    PQfinish(pgConn);
    exit(-1);
  }
  int count = PQntuples(result);
  PQclear(result);

  CU_ASSERT_EQUAL(MaxDBMime, count);
  /** delete the record mimetype_name is application/octet-stream in mimetype */
  memset(SQL, '\0', MAXCMD);
  snprintf(SQL, MAXCMD, "DELETE FROM mimetype where mimetype_name = '%s';", mimetype_name);
  result =  PQexec(pgConn, SQL);
  /** reset the evn, that is clear all data in mimetype */
  if (fo_checkPQcommand(pgConn, result, SQL, __FILE__, __LINE__))
  {
    PQfinish(pgConn);
    exit(-1);
  }
  MaxDBMime = 0;
  PQclear(result);
}
示例#11
0
/**
 * \brief if this account is valid
 *
 * \param[in]  user user name
 * \param[in]  password password
 * \param[out] userId will be set to the id of the user
 * \param[out] userPerm will be set to the permission level of the user
 *
 * \return 1: invalid;
 *         0: yes, valid;
 *        -1: failure
 */
int authentication(char *user, char *password, int *userId, int *userPerm)
{
  if (NULL == user || NULL == password)
  {
    return 1;
  }
  char SQL[MAXSQL] = {0};
  PGresult *result;
  char user_seed[myBUFSIZ] = {0};
  char pass_hash_valid[41] = {0};
  unsigned char pass_hash_actual_raw[21] = {0};
  char pass_hash_actual[41] = {0};

  /** get user_seed, user_pass on one specified user */
  snprintf(SQL,MAXSQL,"SELECT user_seed, user_pass, user_perm, user_pk from users where user_name=$1;");
  const char *values[1] = {user};
  int lengths[1] = {strlen(user)};
  int binary[1] = {0};
  result = PQexecParams(pgConn, SQL, 1, NULL, values, lengths, binary, 0);
  if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__))
  {
    return -1;
  }
  if (!PQntuples(result))
  {
    return 1;
  }
  strcpy(user_seed, PQgetvalue(result, 0, 0));
  strcpy(pass_hash_valid, PQgetvalue(result, 0, 1));
  *userPerm = atoi(PQgetvalue(result, 0, 2));
  *userId = atoi(PQgetvalue(result, 0, 3));
  PQclear(result);
  if (user_seed[0] && pass_hash_valid[0])
  {
    strcat(user_seed, password);  // get the hash code on seed+pass
    SHA1((unsigned char *)user_seed, strlen(user_seed), pass_hash_actual_raw);
  }
  else
  {
    return -1;
  }
  int i = 0;
  char temp[256] = {0};
  for (i = 0; i < 20; i++)
  {
    snprintf(temp, 256, "%02x", pass_hash_actual_raw[i]);
    strcat(pass_hash_actual, temp);
  }
  return (strcmp(pass_hash_valid, pass_hash_actual) == 0) ? 0 : 1;
}
示例#12
0
/**
 * \brief Get a bucketpool_pk based on the bucketpool_name
 *
 * \param PGconn $pgConn  Database connection object
 * \param char $bucketpool_name
 *
 * \return active bucketpool_pk or 0 if error
****************************************************/
FUNCTION int getBucketpool_pk(PGconn *pgConn, char *bucketpool_name)
{
  char *fcnName = "getBucketpool";
  int bucketpool_pk=0;
  char sqlbuf[128];
  PGresult *result;

  /* Skip file if it has already been processed for buckets. */
  sprintf(sqlbuf, "select bucketpool_pk from bucketpool where (bucketpool_name='%s') and (active='Y') order by version desc", 
          bucketpool_name);
  result = PQexec(pgConn, sqlbuf);
  if (fo_checkPQresult(pgConn, result, sqlbuf, fcnName, __LINE__)) return 0;
  if (PQntuples(result) > 0) bucketpool_pk = atoi(PQgetvalue(result, 0, 0));
  PQclear(result);
  return bucketpool_pk;
}
示例#13
0
/**
 @brief Check if table exists.
        Note, this assumes the database name is 'fossology'.

 @param pgConn database connection
 @param tableName

 @todo REMOVE hardcoded catalog name "fossology"
 @return 1 if table exists, 0 on error (which is logged) or if table does not exist.
****************************************************/
int fo_tableExists(PGconn *pgConn, char *tableName)
{
  char sql[256];
  PGresult *result;
  int  TabCount;

  snprintf(sql, sizeof(sql), 
           "select count(*) from information_schema.tables where table_catalog='%s' and table_name='%s'",
          PQdb(pgConn), tableName);
  result = PQexec(pgConn, sql);
  if (fo_checkPQresult(pgConn, result, sql, __FILE__, __LINE__)) return 0;

  TabCount = atol(PQgetvalue(result, 0, 0));

  PQclear(result);
  return(TabCount);
} /* fo_tableExists()  */
示例#14
0
/**
 * \brief initialize
 */
int  DBInsertInit()
{
  char *ErrorBuf;
  char *upload_filename = "argmatch.c.gz";
  int upload_mode = 104;
  char *upload_origin = "argmatch.c.gz";
  char *tmp;

  pgConn = fo_dbconnect(DBConfFile, &ErrorBuf);
  if (!pgConn)
  {
    LOG_FATAL("Unable to connect to database");
    exit(-1);
  }
 
  /** insert upload info */
  memset(SQL,'\0',MAXSQL);
  snprintf(SQL,MAXSQL,"INSERT INTO upload (upload_filename,upload_mode,upload_origin) VALUES ('%s', %d, '%s');",upload_filename, upload_mode, upload_origin);
  result =  PQexec(pgConn, SQL);
  if (fo_checkPQcommand(pgConn, result, SQL, __FILE__ ,__LINE__))
  {
    printf("Insert upload information ERROR!\n");
    return (-1);
  }
  PQclear(result); 

  /* select the upload pk */
  memset(SQL,'\0',MAXSQL);
  snprintf(SQL,MAXSQL,"SELECT upload_pk FROM upload WHERE upload_filename = '%s';",
        upload_filename);
  result =  PQexec(pgConn, SQL);  /* SELECT */
  if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) return(-1);

  tmp = PQgetvalue(result,0,0);
  if(tmp)
  {
    Upload_Pk = tmp;
    upload_pk = atol(tmp);
  }
  PQclear(result);
  return 0;
}
示例#15
0
/**
 * \brief Get the latest nomos agent_pk that has data for this
 * this uploadtree.
 *
 * \param PGconn $pgConn  Database connection object
 * \param int    $upload_pk  
 *
 * \return nomos_agent_pk of the latest version of the nomos agent
 *        that has data for this upload. \n
 *        Or 0 if there is no license data available
 * 
 * NOTE: This function writes error to stdout
 */
FUNCTION int LatestNomosAgent(PGconn *pgConn, int upload_pk)
{
  char *fcnName = "LatestNomosAgent";
  char sql[512];
  PGresult *result;
  int  nomos_agent_pk = 0;

  /*** Find the latest enabled nomos agent_pk ***/
                         
  snprintf(sql, sizeof(sql),
          "select agent_fk from nomos_ars, agent \
              WHERE agent_pk=agent_fk and ars_success=true and upload_fk='%d' \
                    and agent_enabled=true order by agent_ts desc limit 1",
          upload_pk);
  result = PQexec(pgConn, sql);
  if (fo_checkPQresult(pgConn, result, sql, fcnName, __LINE__)) return 0;
  if (PQntuples(result) == 0) return 0;
  nomos_agent_pk = atoi(PQgetvalue(result,0,0));
  PQclear(result);
  return nomos_agent_pk;
}
示例#16
0
/*********************************************************
  \brief Creates filenames from pfile_pk value

  \param pfileNum string containing pfile_pk value 
  \param pfileRepoName string with repo path and filename
  \param pfileRealName string with original filename
  \return 0 = OK, otherwise error code
 *********************************************************/
int pfileNumToNames(char *pfileNum, char *pfileRepoName, char *pfileRealName)
{
  char sqlSelect[256];
  PGresult *result;

  /* Attempt to locate the appropriate pFile_pk record */
  sprintf(sqlSelect, "SELECT pfile_sha1, pfile_md5, pfile_size, ufile_name FROM pfile, uploadtree WHERE pfile_fk = pfile_pk and pfile_pk = '%s'", pfileNum);
  result = PQexec(pgConn, sqlSelect);

  if (fo_checkPQresult(pgConn, result, sqlSelect, __FILE__, __LINE__)) return 0;

  /* confirm a sane result set */
  if (PQntuples(result) == 0)
  {
    PQclear(result);

    /* Not found */
    fprintf(stderr, "Database does not contain pfile_pk: %s\n", pfileNum);
    return 1;
  }
  else if (PQntuples(result) != 1)
  {
    PQclear(result);

    /* Not found */
    fprintf(stderr, "Database contains multiple  pfile_pk: %s\n", pfileNum);
    return 2;
  }
  /* We've managed to locate the one and only pfile_pk record. Build the filePath string */
  /* Concatenate first row fields 0, 1 and 2 */
  sprintf(pfileRepoName, "%s.%s.%s", PQgetvalue(result, 0, 0), PQgetvalue(result, 0, 1), PQgetvalue(result, 0, 2));
  /* and extract the actual filename from field 4 - uploadtree.ufile_name */
  sprintf(pfileRealName, "%s", PQgetvalue(result, 0, 3));

//  fprintf(stderr, "fileName is:%s\n", pFileName);
  PQclear(result);
  return 0;
}
示例#17
0
/**
 * \brief List every upload ID.
 *
 * \param userId user id
 * \param userPerm permission level the user has
 * \return 0 on success; -1 on failure
 */
int listUploads (int userId, int userPerm)
{
  int Row,maxRow;
  long NewPid;
  PGresult *result;
  int rc;
  char *SQL = "SELECT upload_pk,upload_desc,upload_filename FROM upload ORDER BY upload_pk;";
  printf("# Uploads\n");
  result = PQexec(pgConn, SQL);
  if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__))
  {
    exitNow(-1);
  }

  /* list each value */
  maxRow = PQntuples(result);
  for(Row=0; Row < maxRow; Row++)
  {
    NewPid = atol(PQgetvalue(result,Row,0));
    rc = check_read_permission_upload(NewPid, userId, userPerm);
    if (rc < 0)
    {
      PQclear(result);
      return rc;
    }
    if (NewPid >= 0 && (userPerm == PERM_ADMIN || rc  == 0))
    {
      char *S;
      printf("%ld :: %s",NewPid,PQgetvalue(result,Row,2));
      S = PQgetvalue(result,Row,1);
      if (S && S[0]) printf(" (%s)",S);
      printf("\n");
    }
  }
  PQclear(result);
  return 0;
} /* listUploads() */
示例#18
0
/**
 * \brief check if the upload can be deleted, that is the user have
 * the permission to delete this upload
 *
 * \param uploadId upload id
 * \param user_name user name
 * \param userPerm Permission requested by user
 *
 * \return 0: yes, can be deleted;
 *         1: can not be deleted;
 *        -1: failure;
 */
int check_write_permission_folder(long folder_id, int userId, int userPerm)
{
  char SQL[MAXSQL];
  PGresult *result;
  int count = 0;

  if (userPerm < PERM_WRITE)
  {
    return 1; // can not be deleted
  }

  snprintf(SQL,MAXSQL,"SELECT count(*) FROM folder JOIN users ON (users.user_pk = folder.user_fk OR users.user_perm = 10) WHERE folder_pk = %ld AND users.user_pk = %d;",folder_id,userId);
  result = PQexec(pgConn, SQL);
  if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__))
  {
    return -1;
  }
  count = atol(PQgetvalue(result,0,0));
  if(count == 0)
  {
    return 1; // can not be deleted
  }
  return 0; // can be deleted
}
示例#19
0
/**
 * \brief Initialize the bucket definition list
 * If an error occured, write the error to stdout
 *
 * \param PGconn $pgConn  Database connection object
 * \param int $bucketpool_pk
 * \param cacheroot_t $pcroot  license cache root
 *
 * \return an array of bucket definitions (in eval order)
 * or 0 if error.
 */
FUNCTION pbucketdef_t initBuckets(PGconn *pgConn, int bucketpool_pk, cacheroot_t *pcroot)
{
  char *fcnName = "initBuckets";
  char sqlbuf[256];
  char filepath[256];
  char hostname[256];
  PGresult *result;
  pbucketdef_t bucketDefList = 0;
  int  numRows, rowNum;
  int  rv, numErrors=0;
  struct stat statbuf;

  /* reasonable input validation  */
  if ((!pgConn) || (!bucketpool_pk)) 
  {
    printf("ERROR: %s.%s.%d Invalid input pgConn: %lx, bucketpool_pk: %d.\n",
            __FILE__, fcnName, __LINE__, (unsigned long)pgConn, bucketpool_pk);
    return 0;
  }

  /* get bucket defs from db */
  sprintf(sqlbuf, "select bucket_pk, bucket_type, bucket_regex, bucket_filename, stopon, bucket_name, applies_to from bucket_def where bucketpool_fk=%d order by bucket_evalorder asc", bucketpool_pk);
  result = PQexec(pgConn, sqlbuf);
  if (fo_checkPQresult(pgConn, result, sqlbuf, fcnName, __LINE__)) return 0;
  numRows = PQntuples(result);
  if (numRows == 0) /* no bucket recs for pool?  return error */
  {
    printf("ERROR: %s.%s.%d No bucket defs for pool %d.\n",
            __FILE__, fcnName, __LINE__, bucketpool_pk);
    PQclear(result);
    return 0;
  }

  bucketDefList = calloc(numRows+1, sizeof(bucketdef_t));
  if (bucketDefList == 0)
  {
    printf("ERROR: %s.%s.%d No memory to allocate %d bucket defs.\n",
            __FILE__, fcnName, __LINE__, numRows);
    return 0;
  }

  /* put each db bucket def into bucketDefList in eval order */
  for (rowNum=0; rowNum<numRows; rowNum++)
  {
    bucketDefList[rowNum].bucket_pk = atoi(PQgetvalue(result, rowNum, 0));
    bucketDefList[rowNum].bucket_type = atoi(PQgetvalue(result, rowNum, 1));
    bucketDefList[rowNum].bucketpool_pk = bucketpool_pk;

    /* compile regex if type 3 (REGEX) */
    if (bucketDefList[rowNum].bucket_type == 3)
    {
      rv = regcomp(&bucketDefList[rowNum].compRegex, PQgetvalue(result, rowNum, 2), 
                   REG_NOSUB | REG_ICASE | REG_EXTENDED);
      if (rv != 0)
      {
        printf("ERROR: %s.%s.%d Invalid regular expression for bucketpool_pk: %d, bucket: %s\n",
               __FILE__, fcnName, __LINE__, bucketpool_pk, PQgetvalue(result, rowNum, 5));
        numErrors++;
      }
      bucketDefList[rowNum].regex = strdup(PQgetvalue(result, rowNum, 2));
    }

    bucketDefList[rowNum].dataFilename = strdup(PQgetvalue(result, rowNum, 3));

    /* verify that external file dataFilename exists */
    if (strlen(bucketDefList[rowNum].dataFilename) > 0)
    {
      snprintf(filepath, sizeof(filepath), "%s/bucketpools/%d/%s",
        PROJECTSTATEDIR, bucketpool_pk, bucketDefList[rowNum].dataFilename);
      if (stat(filepath, &statbuf) == -1)
      {
        hostname[0] = 0;
        gethostname(hostname, sizeof(hostname));
        printf("ERROR: %s.%s.%d File: %s is missing on host: %s.  bucketpool_pk: %d, bucket: %s\n",
               __FILE__, fcnName, __LINE__, filepath, hostname, bucketpool_pk, PQgetvalue(result, rowNum, 5));
        numErrors++;
      }
    }

    /* MATCH_EVERY */
    if (bucketDefList[rowNum].bucket_type == 1)
      bucketDefList[rowNum].match_every = getMatchEvery(pgConn, bucketpool_pk, bucketDefList[rowNum].dataFilename, pcroot);

    /* MATCH_ONLY */
    if (bucketDefList[rowNum].bucket_type == 2)
    {
      bucketDefList[rowNum].match_only = getMatchOnly(pgConn, bucketpool_pk, bucketDefList[rowNum].dataFilename, pcroot);
    }

    /* REGEX-FILE */
    if (bucketDefList[rowNum].bucket_type == 5)
    {
      bucketDefList[rowNum].regex_row = getRegexFile(pgConn, bucketpool_pk, bucketDefList[rowNum].dataFilename, pcroot);
    }

    bucketDefList[rowNum].stopon = *PQgetvalue(result, rowNum, 4);
    bucketDefList[rowNum].bucket_name = strdup(PQgetvalue(result, rowNum, 5));
    bucketDefList[rowNum].applies_to = *PQgetvalue(result, rowNum, 6);
  }
  PQclear(result);
  if (numErrors) return 0;

  if (debug)
  {
    for (rowNum=0; rowNum<numRows; rowNum++)
    {
      printf("\nbucket_pk[%d] = %d\n", rowNum, bucketDefList[rowNum].bucket_pk);
      printf("bucket_name[%d] = %s\n", rowNum, bucketDefList[rowNum].bucket_name);
      printf("bucket_type[%d] = %d\n", rowNum, bucketDefList[rowNum].bucket_type);
      printf("dataFilename[%d] = %s\n", rowNum, bucketDefList[rowNum].dataFilename);
      printf("stopon[%d] = %c\n", rowNum, bucketDefList[rowNum].stopon);
      printf("applies_to[%d] = %c\n", rowNum, bucketDefList[rowNum].applies_to);
      printf("nomos_agent_pk[%d] = %d\n", rowNum, bucketDefList[rowNum].nomos_agent_pk);
      printf("bucket_agent_pk[%d] = %d\n", rowNum, bucketDefList[rowNum].bucket_agent_pk);
      printf("regex[%d] = %s\n", rowNum, bucketDefList[rowNum].regex);
    }
  }

  return bucketDefList;
}
示例#20
0
/**
 \brief Add a new license to license_ref table

 Adds a license to license_ref table.

 @param  licenseName Name of license

 @return rf_pk for success, 0 for failure
 */
FUNCTION long add2license_ref(char *licenseName)
{

  PGresult *result;
  char query[myBUFSIZ];
  char insert[myBUFSIZ];
  char escLicName[myBUFSIZ];
  char *specialLicenseText;
  long rf_pk;

  int len;
  int error;
  int numRows;

  // escape the name
  len = strlen(licenseName);
  PQescapeStringConn(gl.pgConn, escLicName, licenseName, len, &error);
  if (error)
  LOG_WARNING("Does license name %s have multibyte encoding?", licenseName)

  /* verify the license is not already in the table */
  sprintf(query, "SELECT rf_pk FROM " LICENSE_REF_TABLE " where rf_shortname='%s'", escLicName);
  result = PQexec(gl.pgConn, query);
  if (fo_checkPQresult(gl.pgConn, result, query, __FILE__, __LINE__))
    return 0;
  numRows = PQntuples(result);
  if (numRows)
  {
    rf_pk = atol(PQgetvalue(result, 0, 0));
    PQclear(result);
    return rf_pk;
  }
  PQclear(result);

  /* Insert the new license */
  specialLicenseText = "License by Nomos.";

  sprintf(insert, "insert into license_ref(rf_shortname, rf_text, rf_detector_type) values('%s', '%s', 2)", escLicName,
      specialLicenseText);
  result = PQexec(gl.pgConn, insert);
  // ignore duplicate constraint failure (23505), report others
  if ((result == 0)
      || ((PQresultStatus(result) != PGRES_COMMAND_OK)
          && (strncmp(PG_ERRCODE_UNIQUE_VIOLATION, PQresultErrorField(result, PG_DIAG_SQLSTATE), 5))))
  {
    printf("ERROR: %s(%d): Nomos failed to add a new license. %s/n: %s/n",
    __FILE__, __LINE__, PQresultErrorMessage(result), insert);
    PQclear(result);
    return (0);
  }
  PQclear(result);

  /* retrieve the new rf_pk */
  result = PQexec(gl.pgConn, query);
  if (fo_checkPQresult(gl.pgConn, result, query, __FILE__, __LINE__))
    return 0;
  numRows = PQntuples(result);
  if (numRows)
    rf_pk = atol(PQgetvalue(result, 0, 0));
  else
  {
    printf("ERROR: %s:%s:%d Just inserted value is missing. On: %s", __FILE__, "add2license_ref()", __LINE__, query);
    PQclear(result);
    return (0);
  }
  PQclear(result);

  return (rf_pk);
}
示例#21
0
/**
 * \brief Draw folder tree.
 *
 *   if DelFlag is set, then all child uploads are
 *   deleted and the folders are deleted.
 *
 * \param Parent the parent folder id
 * \param Depth
 * \param row grandparent (used to unlink if multiple grandparents)
 * \param DelFlag 0=no del, 1=del if unique parent, 2=del unconditional
 * \param userId
 * \param userPerm permission level the user has
 *
 * \return 0: success;
 *         1: fail;
 *        -1: failure
 *
 */
int listFoldersRecurse (long Parent, int Depth, long Row, int DelFlag, int userId, int userPerm)
{
  int r, i, rc, maxRow;
  int count, resultUploadCount;
  long Fid;
  char *Desc;
  char SQL[MAXSQL], SQLUpload[MAXSQL];
  char SQLFolder[MAXSQLFolder];
  PGresult *result, *resultUpload, *resultFolder;

  rc = check_write_permission_folder(Parent, userId, userPerm);
  if(rc < 0)
  {
    return rc;
  }
  if(DelFlag && rc > 0){
    return 1;
  }

  snprintf(SQLFolder, MAXSQLFolder,"SELECT COUNT(*) FROM folderlist WHERE folder_pk=%ld",Parent);
  resultFolder = PQexec(pgConn, SQLFolder);
  count= atoi(PQgetvalue(resultFolder,0,0));
  PQclear(resultFolder);

  /* Find all folders with this parent and recurse, but don't show uploads, if they also exist in other directories */
  snprintf(SQL,MAXSQL,"SELECT folder_pk,foldercontents_mode,name,description,upload_pk,pfile_fk FROM folderlist WHERE parent=%ld"
                      " ORDER BY name,parent,folder_pk ", Parent);
  result = PQexec(pgConn, SQL);
  if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__))
  {
    return -1;
  }
  maxRow = PQntuples(result);
  for(r=0; r < maxRow; r++)
  {
    if (atol(PQgetvalue(result,r,0)) == Parent)
    {
      continue;
    }

    Fid = atol(PQgetvalue(result,r,0));
    if (Fid != 0)
    {
      if (!DelFlag)
      {
        for(i=0; i<Depth; i++)
        {
          fputs("   ",stdout);
        }
        printf("%4ld :: %s",Fid,PQgetvalue(result,r,2));
        Desc = PQgetvalue(result,r,3);
        if (Desc && Desc[0])
        {
          printf(" (%s)",Desc);
        }
        printf("\n");
      }
      rc = listFoldersRecurse(Fid,Depth+1,Parent,DelFlag,userId,userPerm);
      if (rc < 0)
      {
        if (DelFlag)
        {
          printf("Deleting the folder failed.");
        }
        return 1;
      }
    }
    else
    {
      if (DelFlag==1 && unlinkContent(Parent,Row,1,userId,userPerm)==0)
      {
        continue;
      }
      if (rc < 0)
      {
        return rc;
      }
      if (DelFlag)
      {
        snprintf(SQLUpload, MAXSQL,"SELECT COUNT(*) FROM folderlist WHERE pfile_fk=%ld", atol(PQgetvalue(result,r,5)));
        resultUpload = PQexec(pgConn, SQLUpload);
        resultUploadCount = atoi(PQgetvalue(resultUpload,0,0));
        if(count < 2 && resultUploadCount < 2)
        {
          rc = deleteUpload(atol(PQgetvalue(result,r,4)),userId, userPerm);
          if (rc < 0)
          {
            return rc;
          }
          if (rc != 0)
          {
            printf("Deleting the folder failed since it contains uploads you can't delete.");
            return rc;
          }
        }
        else{
          rc = unlinkContent(atol(PQgetvalue(result,r,4)),Parent,2,userId,userPerm);
          if(rc < 0){
            return rc;
          }
        }
      }
      else
      {
        rc = check_read_permission_upload(atol(PQgetvalue(result,r,4)),userId,userPerm);
        if (rc < 0)
        {
          return rc;
        }
        if (rc == 0)
        {
          for(i=0; i<Depth; i++)
          {
            fputs("   ",stdout);
          }
          printf("%4s :: Contains: %s\n","--",PQgetvalue(result,r,2));
        }
      }
    }
  }
  PQclear(result);

  switch(Parent)
  {
    case 1: /* skip default parent */
      if (DelFlag != 0)
      {
        printf("INFO: Default folder not deleted.\n");
      }
      break;
    case 0: /* it's an upload */
      break;
    default:  /* it's a folder */
      if (DelFlag == 0)
      {
        break;
      }
      printf("INFO: folder id=%ld will be deleted with flag %d\n",Parent,DelFlag);
      if (DelFlag==1)
      {
        rc = unlinkContent(Parent,Row,1,userId,userPerm);
        if (rc == 0)
        {
          break;
        }
        if (rc < 0)
        {
          return rc;
        }
      }
      if(Row > 0)
        snprintf(SQL,MAXSQL,"DELETE FROM foldercontents WHERE foldercontents_mode=1 AND parent_fk=%ld AND child_id=%ld",Row,Parent);
      else
        snprintf(SQL,MAXSQL,"DELETE FROM foldercontents WHERE foldercontents_mode=1 AND child_id=%ld",Parent);
      if (Test)
      {
        printf("TEST: %s\n",SQL);
      }
      else
      {
        PQexecCheckClear(NULL, SQL, __FILE__, __LINE__);
      }
      if(Row > 0)
        snprintf(SQL,MAXSQL,"DELETE FROM folder f USING foldercontents fc WHERE  f.folder_pk = fc.child_id AND fc.parent_fk='%ld' AND f.folder_pk = '%ld';",Row,Parent);
      else
        snprintf(SQL,MAXSQL,"DELETE FROM folder WHERE folder_pk = '%ld';",Parent);
      if (Test)
      {
        printf("TEST: %s\n",SQL);
      }
      else
      {
        PQexecCheckClear(NULL, SQL, __FILE__, __LINE__);
      }
  } /* switch() */

  return 0; /* success */
} /* listFoldersRecurse() */
示例#22
0
/****************  main  *******************/
int main(int argc, char** argv)
{
  PGconn* pgConn;
  PGresult* result;
  psqlCopy_t pCopy;
  char* TestTable = "TestsqlCopy";
  char col_vc[40] = "This is \n\r column vc[40] 123456789\r";
  char* col_text;
  char* DataBuf;
  int datasize;
  char sql[2048];
  int NumColumns = 3;
  int CopyBufSize;
  int RowsToTest;
  int NumTextBytes;
  int RowNum;
  clock_t StartTime, EndTime;
  char* DBConfFile = NULL;  /* use default Db.conf */
  char* ErrorBuf;

  if (argc != 4)
  {
    printf("Usage: %s RowsToTest NumTextBytes CopyDataBufferSize\n", argv[0]);
    exit(-1);
  }

  /* first argument is the number of rows to test, 
   * the second is the number of bytes to use for col_text
   * third is the Copy data buffer size
   */
  RowsToTest = atoi(argv[1]);
  NumTextBytes = atoi(argv[2]);
  CopyBufSize = atoi(argv[3]);

  /* Populate test data */
  col_text = GetTextCol(NumTextBytes);
  datasize = NumTextBytes + 8 + 40 + 1;
  DataBuf = calloc(datasize, sizeof(char));
  if (!DataBuf)
  {
    ERROR_RETURN("Allocating test data buffer failed.")
    exit(-2);
  }

  pgConn = fo_dbconnect(DBConfFile, &ErrorBuf);

  /* Create a test table to populate */
  snprintf(sql, sizeof(sql), "create table %s (col_int integer, col_text text, col_vc varchar(40))", TestTable);
  result = PQexec(pgConn, sql);
  fo_checkPQcommand(pgConn, result, sql, __FILE__, __LINE__);

  /* Start timer */
  StartTime = clock();

  /* Create the pCopy */
  pCopy = fo_sqlCopyCreate(pgConn, TestTable, CopyBufSize, NumColumns,
    "col_int", "col_text", "col_vc");
  if (!pCopy) exit(1);  /* CopyCreate prints errors to stdout */

  /* Add data */
  for (RowNum = 0; RowNum < RowsToTest; RowNum++)
  {
    snprintf(DataBuf, datasize, "%d\t%s\t%s\n", RowNum, col_text, col_vc);
    fo_sqlCopyAdd(pCopy, DataBuf);
  }
  free(col_text);

  /* Destroy - flushes remaining data and frees */
  fo_sqlCopyDestroy(pCopy, 1);

  /* Print run time for the load (whole Create/Add/Destroy cycle). */
  EndTime = clock();
  printf("%.6f Seconds to load.\n", ((double) (EndTime - StartTime)) / CLOCKS_PER_SEC);

  /* Verify that the right number of records were loaded */
  snprintf(sql, sizeof(sql), "select count(*) from %s", TestTable);
  result = PQexec(pgConn, sql);
  fo_checkPQresult(pgConn, result, sql, __FILE__, __LINE__);
  printf("%d records inserted, %d expected\n",
    atoi(PQgetvalue(result, 0, 0)),
    RowsToTest);
  PQclear(result);

  /* Remove the test table */
/*
  snprintf(sql, sizeof(sql), "drop table %s", TestTable);
  result = PQexec(pgConn, sql);
  fo_checkPQcommand(pgConn, result, sql, __FILE__, __LINE__);
*/

  PQfinish(pgConn);
  return (0);
}
示例#23
0
/**
 * \brief Given an upload ID, delete it.
 *
 * \param uploadId the upload id
 * \param userId
 * \param userPerm permission level the user has
 *
 * \return 0: yes, can is deleted;
 *         1: can not be deleted;
 *        -1: failure;
 *        -2: does not exist
 */
int deleteUpload (long uploadId, int userId, int userPerm)
{
  char *S;
  int Row,maxRow;
  char tempTable[256];
  PGresult *result, *pfileResult;
  char SQL[MAXSQL], desc[myBUFSIZ];

  int permission_upload = check_write_permission_upload(uploadId, userId, userPerm);
  if(0 != permission_upload) {
    return permission_upload;
  }

  snprintf(tempTable,sizeof(tempTable),"DelUp_%ld_pfile",uploadId);
  snprintf(SQL,MAXSQL,"DROP TABLE IF EXISTS %s;",tempTable);
  PQexecCheckClear(NULL, SQL, __FILE__, __LINE__);

  snprintf(desc, myBUFSIZ, "Deleting upload %ld",uploadId);
  PQexecCheckClear(desc, "SET statement_timeout = 0;", __FILE__, __LINE__);
  PQexecCheckClear(NULL, "BEGIN;", __FILE__, __LINE__);

  /* Delete everything that impacts the UI */
  if (!Test) {
    /* The UI depends on uploadtree and folders for navigation.
     Delete them now to block timeouts from the UI. */
    PQexecCheckClear(NULL, "COMMIT;", __FILE__, __LINE__);
  }

  /* Begin complicated stuff */
  /* Get the list of pfiles to delete */
  /* These are all pfiles in the upload_fk that only appear once. */
  snprintf(SQL,MAXSQL,"SELECT DISTINCT pfile_pk,pfile_sha1 || '.' || pfile_md5 || '.' || pfile_size AS pfile INTO %s FROM uploadtree INNER JOIN pfile ON upload_fk = %ld AND pfile_fk = pfile_pk;",tempTable,uploadId);
  PQexecCheckClear("Getting list of pfiles to delete", SQL, __FILE__, __LINE__);

  /* Remove pfiles which are reused by other uploads */
  snprintf(SQL, MAXSQL, "DELETE FROM %s WHERE pfile_pk IN (SELECT pfile_pk FROM %s INNER JOIN uploadtree ON pfile_pk = pfile_fk WHERE upload_fk != %ld)", tempTable, tempTable, uploadId);
  PQexecCheckClear(NULL, SQL, __FILE__, __LINE__);

  if (Verbose) {
    snprintf(SQL,MAXSQL,"SELECT COUNT(*) FROM %s;",tempTable);
    result = PQexec(pgConn, SQL);
    if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) {
      return -1;
    }
    printf("# Created pfile table %s with %ld entries\n", tempTable, atol(PQgetvalue(result,0,0)));
    PQclear(result);
  }

  /* Now to delete the actual pfiles from the repository before remove the DB. */
  /* Get the file listing -- needed for deleting pfiles from the repository. */
  snprintf(SQL,MAXSQL,"SELECT * FROM %s ORDER BY pfile_pk;",tempTable);
  pfileResult = PQexec(pgConn, SQL);
  if (fo_checkPQresult(pgConn, pfileResult, SQL, __FILE__, __LINE__)) {
    return -1;
  }

  if (Test <= 1) {
    maxRow = PQntuples(pfileResult);
    for(Row=0; Row<maxRow; Row++) {
      S = PQgetvalue(pfileResult,Row,1); /* sha1.md5.len */
      if (fo_RepExist("files",S)) {
        if (Test) {
          printf("TEST: Delete %s %s\n","files",S);
        } else {
          fo_RepRemove("files",S);
        }
      }
      if (fo_RepExist("gold",S)) {
        if (Test) {
          printf("TEST: Delete %s %s\n","gold",S);
        } else {
          fo_RepRemove("gold",S);
        }
      }
      fo_scheduler_heart(1);
    }
  }
  PQclear(pfileResult);

  /*
   This begins the slow part that locks the DB.
   The problem is, we don't want to lock a critical row,
   otherwise the scheduler will lock and/or fail.
  */
  if (!Test) {
    PQexecCheckClear(NULL, "BEGIN;", __FILE__, __LINE__);
  }
  /* Delete the upload from the folder-contents table */
  snprintf(SQL,MAXSQL,"DELETE FROM foldercontents WHERE (foldercontents_mode & 2) != 0 AND child_id = %ld;",uploadId);
  PQexecCheckClear("Deleting foldercontents", SQL, __FILE__, __LINE__);

  /* Deleting the actual upload contents*/
  /* Delete the bucket_container record as it can't be cascade delete with upload table */
  snprintf(SQL,MAXSQL,"DELETE FROM bucket_container USING uploadtree WHERE uploadtree_fk = uploadtree_pk AND upload_fk = %ld;",uploadId);
  PQexecCheckClear("Deleting bucket_container", SQL, __FILE__, __LINE__);

  /* Delete the tag_uploadtree record as it can't be cascade delete with upload table */
  snprintf(SQL,MAXSQL,"DELETE FROM tag_uploadtree USING uploadtree WHERE uploadtree_fk = uploadtree_pk AND upload_fk = %ld;",uploadId);
  PQexecCheckClear("Deleting tag_uploadtree", SQL, __FILE__, __LINE__);

  /* Delete uploadtree_nnn table */
  char uploadtree_tablename[1024];
  snprintf(SQL,MAXSQL,"SELECT uploadtree_tablename FROM upload WHERE upload_pk = %ld;",uploadId);
  result = PQexec(pgConn, SQL);
  if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) {
    return -1;
  }
  if (PQntuples(result)) {
    strcpy(uploadtree_tablename, PQgetvalue(result, 0, 0));
    PQclear(result);
    if (strcasecmp(uploadtree_tablename,"uploadtree_a")) {
      snprintf(SQL,MAXSQL,"DROP TABLE %s;", uploadtree_tablename);
      PQexecCheckClear(NULL, SQL, __FILE__, __LINE__);
    }
  }

  printfInCaseOfVerbosity("Deleting license decisions for upload %ld\n",uploadId);
  /* delete from clearing_decision_event table. */
  snprintf(SQL, MAXSQL, "DELETE FROM clearing_decision_event USING clearing_event WHERE clearing_decision_event.clearing_event_fk = clearing_event.clearing_event_pk AND clearing_event.uploadtree_fk IN (SELECT uploadtree_pk FROM uploadtree INNER JOIN %s ON uploadtree.pfile_fk = %s.pfile_pk WHERE upload_fk = %ld);", tempTable, tempTable, uploadId);
  PQexecCheckClear("Deleting from clearing_decision_event", SQL, __FILE__, __LINE__);

  /* delete from clearing_event table. */
  snprintf(SQL, MAXSQL, "DELETE FROM clearing_event WHERE uploadtree_fk IN (SELECT uploadtree_pk FROM uploadtree INNER JOIN %s ON uploadtree.pfile_fk = %s.pfile_pk WHERE upload_fk = %ld);", tempTable, tempTable, uploadId);
  PQexecCheckClear("Deleting from clearing_event", SQL, __FILE__, __LINE__);

  /* delete from uploadtree table. */
  snprintf(SQL, MAXSQL, "DELETE FROM uploadtree WHERE upload_fk = %ld;", uploadId);
  PQexecCheckClear("Deleting from uploadtree", SQL, __FILE__, __LINE__);

  /* delete from pfile is SLOW due to constraint checking. Do it separately. */
  snprintf(SQL,MAXSQL,"DELETE FROM pfile USING %s WHERE pfile.pfile_pk = %s.pfile_pk;",tempTable,tempTable);
  PQexecCheckClear("Deleting from pfile", SQL, __FILE__, __LINE__);

  snprintf(SQL,MAXSQL,"DROP TABLE %s;",tempTable);
  PQexecCheckClear(NULL, SQL, __FILE__, __LINE__);

  PQexecCheckClear(NULL, "SET statement_timeout = 120000;", __FILE__, __LINE__);

  printfInCaseOfVerbosity("Deleted upload %ld from DB, now doing repository.\n",uploadId);

  if (Test) {
    PQexecCheckClear(NULL, "ROLLBACK;", __FILE__, __LINE__);
  } else {
    PQexecCheckClear(NULL, "COMMIT;", __FILE__, __LINE__);
  }

  printfInCaseOfVerbosity("Deleted upload %ld\n",uploadId);

  return 0; /* success */
} /* deleteUpload() */
示例#24
0
int	main(int argc, char *argv[])
{
  int Pid;
  int c;
  int rvExist1=0, rvExist2=0;
  PGresult *result;
  char *NewDir=".";
  char *AgentName = "ununpack";
  char *AgentARSName = "ununpack_ars";
  char *agent_desc = "Unpacks archives (iso, tar, etc)";
  int   Recurse=0;
  int   ars_pk = 0;
  int   user_pk = 0;
  long  Pfile_size = 0;
  char *ListOutName=NULL;
  char *Fname = NULL;
  char *FnameCheck = NULL;
  char *COMMIT_HASH;
  char *VERSION;
  char agent_rev[PATH_MAX];
  struct stat Stat;

  /* connect to the scheduler */
  fo_scheduler_connect(&argc, argv, &pgConn);

  while((c = getopt(argc,argv,"ACc:d:FfHhL:m:PQiqRr:T:t:U:VvXx")) != -1)
  {
    switch(c)
    {
      case 'A':	SetContainerArtifact=0; break;
      case 'C':	ForceContinue=1; break;
      case 'c':	break;  /* handled by fo_scheduler_connect() */
      case 'd':	
        /* if there is a %U in the path, substitute a unique ID */
        NewDir=PathCheck(optarg);
        break;
      case 'F':	UseRepository=1; break;
      case 'f':	ForceDuplicate=1; break;
      case 'L':	ListOutName=optarg; break;
      case 'm':
        MaxThread = atoi(optarg);
        if (MaxThread < 1) MaxThread=1;
        break;
      case 'P':	PruneFiles=1; break;
      case 'R':	Recurse=-1; break;
      case 'r':	Recurse=atoi(optarg); break;
      case 'i':
        if (!IsExe("dpkg-source",Quiet))
          LOG_WARNING("dpkg-source is not available on this system.  This means that debian source packages will NOT be unpacked.");
        SafeExit(0);
        break; /* never reached */
      case 'Q':
        UseRepository=1;

        user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */

        /* Get the upload_pk from the scheduler */
        if((Upload_Pk = fo_scheduler_next()) == NULL) SafeExit(0);
        break;
      case 'q':	Quiet=1; break;
      case 'T':
        memset(REP_GOLD,0,sizeof(REP_GOLD));
        strncpy(REP_GOLD,optarg,sizeof(REP_GOLD)-1);
        break;
      case 't':
        memset(REP_FILES,0,sizeof(REP_FILES));
        strncpy(REP_FILES,optarg,sizeof(REP_FILES)-1);
        break;
      case 'U':	
        UseRepository = 1;
        Recurse = -1;
        Upload_Pk = optarg; 
        break;
      case 'V': printf("%s", BuildVersion);SafeExit(0);
      case 'v':	Verbose++; break;
      case 'X':	UnlinkSource=1; break;
      case 'x':	UnlinkAll=1; break;
      default:
        Usage(argv[0], BuildVersion);
        SafeExit(25);
    }
  }

  /* Open DB and Initialize CMD table */
  if (UseRepository) 
  {
    /* Check Permissions */
    if (GetUploadPerm(pgConn, atoi(Upload_Pk), user_pk) < PERM_WRITE)
    {
      LOG_ERROR("You have no update permissions on upload %s", Upload_Pk);
      SafeExit(100);
    }
        
    COMMIT_HASH = fo_sysconfig(AgentName, "COMMIT_HASH");
    VERSION = fo_sysconfig(AgentName, "VERSION");
    sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH);
    /* Get the unpack agent key */
    agent_pk = fo_GetAgentKey(pgConn, AgentName, atoi(Upload_Pk), agent_rev,agent_desc);

    InitCmd();

    /* Make sure ars table exists */
    if (!fo_CreateARSTable(pgConn, AgentARSName)) SafeExit(0);

    /* Has this user previously unpacked this upload_pk successfully?
     *    In this case we are done.  No new ars record is needed since no
     *    processing is initiated.
     * The unpack version is ignored.
     */
    snprintf(SQL,MAXSQL,
        "SELECT ars_pk from %s where upload_fk='%s' and ars_success=TRUE",
           AgentARSName, Upload_Pk);
    result =  PQexec(pgConn, SQL);
    if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(101);

    if (PQntuples(result) > 0) /* if there is a value */
    {  
      PQclear(result);
      LOG_WARNING("Upload_pk %s, has already been unpacked.  No further action required", 
              Upload_Pk)
      SafeExit(0);
    }
    PQclear(result);

    /* write the unpack_ars start record */
    ars_pk = fo_WriteARS(pgConn, ars_pk, atoi(Upload_Pk), agent_pk, AgentARSName, 0, 0);

    /* Get Pfile path and Pfile_Pk, from Upload_Pk */
  snprintf(SQL,MAXSQL,
        "SELECT pfile.pfile_sha1 || '.' || pfile.pfile_md5 || '.' || pfile.pfile_size AS pfile, pfile_fk, pfile_size FROM upload INNER JOIN pfile ON upload.pfile_fk = pfile.pfile_pk WHERE upload.upload_pk = '%s'", 
           Upload_Pk);
    result =  PQexec(pgConn, SQL);
    if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(102);

    if (PQntuples(result) > 0) /* if there is a value */
    {  
      Pfile = strdup(PQgetvalue(result,0,0));
      Pfile_Pk = strdup(PQgetvalue(result,0,1));
      Pfile_size = atol(PQgetvalue(result, 0, 2));
      if (Pfile_size == 0)
      {  
        PQclear(result);
        LOG_WARNING("Uploaded file (Upload_pk %s), is zero length.  There is nothing to unpack.", 
                      Upload_Pk)
        SafeExit(0);
      }

      PQclear(result);
    }

    // Determine if uploadtree records should go into a separate table.
    // If the input file size is > 500MB, then create a separate uploadtree_{upload_pk} table
    // that inherits from the master uploadtree table.
    // Save uploadtree_tablename, it will get written to upload.uploadtree_tablename later.
    if (Pfile_size > 500000000)
    {
      sprintf(uploadtree_tablename, "uploadtree_%s", Upload_Pk);
      if (!fo_tableExists(pgConn, uploadtree_tablename))
      {
        snprintf(SQL,MAXSQL,"CREATE TABLE %s (LIKE uploadtree INCLUDING DEFAULTS INCLUDING CONSTRAINTS INCLUDING INDEXES); ALTER TABLE %s ADD CONSTRAINT %s CHECK (upload_fk=%s); ALTER TABLE %s INHERIT uploadtree", 
               uploadtree_tablename, uploadtree_tablename, uploadtree_tablename, Upload_Pk, uploadtree_tablename);
        PQsetNoticeProcessor(pgConn, SQLNoticeProcessor, SQL);  // ignore notice about implicit primary key index creation
        result =  PQexec(pgConn, SQL);
        // Ignore postgres notice about creating an implicit index
        if (PQresultStatus(result) != PGRES_NONFATAL_ERROR)
          if (fo_checkPQcommand(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(103);
        PQclear(result);
      }
    }
    else
      strcpy(uploadtree_tablename, "uploadtree_a");

  }

  CheckCommands(Quiet);
  if (NewDir) MkDir(NewDir);
  if (Verbose) { fclose(stderr) ; stderr=stdout; } /* don't interlace! */
  if (ListOutName != NULL)
  {
    if ((ListOutName[0]=='-') && (ListOutName[1]=='\0'))
      ListOutFile=stdout;
    else ListOutFile = fopen(ListOutName,"w");
    if (!ListOutFile)
    {
      LOG_ERROR("pfile %s Unable to write to %s\n",Pfile_Pk,ListOutName)
      SafeExit(104);
    }
    else
    {
      /* Start the file */
      fputs("<xml tool=\"ununpack\" ",ListOutFile);
      fputs("version=\"",ListOutFile);
      fputs(Version,ListOutFile);
      fputs("\" ",ListOutFile);
      fputs("compiled_date=\"",ListOutFile);
      fputs(__DATE__,ListOutFile);
      fputs(" ",ListOutFile);
      fputs(__TIME__,ListOutFile);
      fputs("\"",ListOutFile);
      fputs(">\n",ListOutFile);
    }
    /* Problem: When parallel processing, the XML may be generated out
	   of order.  Solution?  When using XML, only use 1 thread. */
    MaxThread=1;
  }

  // Set ReunpackSwitch if the uploadtree records are missing from the database.
  if (!ReunpackSwitch && UseRepository)
  {
    snprintf(SQL,MAXSQL,"SELECT uploadtree_pk FROM uploadtree WHERE upload_fk=%s limit 1;",Upload_Pk);
    result =  PQexec(pgConn, SQL);
    if (fo_checkPQresult(pgConn, result, SQL, __FILE__, __LINE__)) SafeExit(105);
    if (PQntuples(result) == 0) ReunpackSwitch=1;
    PQclear(result);
  }

  /*** process files from command line ***/
  for( ; optind<argc; optind++)
  {
    CksumFile *CF=NULL;
    Cksum *Sum;
    int i;
    if (Fname) { free(Fname); Fname=NULL; }
    if (ListOutName != NULL)
    {
      fprintf(ListOutFile,"<source source=\"%s\" ",argv[optind]);
      if (UseRepository && !fo_RepExist(REP_FILES,argv[optind]))
      {
        /* make sure the source exists in the src repository */
        if (fo_RepImport(argv[optind],REP_FILES,argv[optind],1) != 0)
        {
          LOG_ERROR("Failed to import '%s' as '%s' into the repository",argv[optind],argv[optind])
          SafeExit(106);
        }
      }
    }

    if (UseRepository)
    {
      if (fo_RepExist(REP_FILES,argv[optind]))
      {
        Fname=fo_RepMkPath(REP_FILES,argv[optind]);
      }
      else if (fo_RepExist(REP_GOLD,argv[optind]))
      {
        Fname=fo_RepMkPath(REP_GOLD,argv[optind]);
        if (fo_RepImport(Fname,REP_FILES,argv[optind],1) != 0)
        {
          LOG_ERROR("Failed to import '%s' as '%s' into the repository",Fname,argv[optind])
          SafeExit(107);
        }
      }

      if (Fname)
      {
        FnameCheck = Fname;
        CF = SumOpenFile(Fname);
      }
      else
      {
        LOG_ERROR("NO file unpacked.  File %s does not exist either in GOLD or FILES", Pfile);
        SafeExit(108);
      }
      /* else: Fname is NULL and CF is NULL */
    }
    else 
    {
      FnameCheck = argv[optind];
      CF = SumOpenFile(argv[optind]);
    }

    /* Check file to unpack.  Does it exist?  Is it zero length? */
    if (stat(FnameCheck,&Stat)) 
    {
      LOG_ERROR("File to unpack is unavailable: %s, error: %s", Fname, strerror(errno));
      SafeExit(109);
    }
    else
    if (Stat.st_size < 1)
    {
      LOG_WARNING("File to unpack is empty: %s", Fname);
      SafeExit(110);
    }

    if (ListOutFile)
    {
      if (CF)
      {
        Sum = SumComputeBuff(CF);
        SumCloseFile(CF);
        if (Sum)
        {
          fputs("fuid=\"",ListOutFile);
          for(i=0; i<20; i++)
          { fprintf(ListOutFile,"%02X",Sum->SHA1digest[i]); }
          fputs(".",ListOutFile);
          for(i=0; i<16; i++)
          { fprintf(ListOutFile,"%02X",Sum->MD5digest[i]); }
          fputs(".",ListOutFile);
          fprintf(ListOutFile,"%Lu",(long long unsigned int)Sum->DataLen);
          fputs("\" ",ListOutFile);
          free(Sum);
        } /* if Sum */
      } /* if CF */
      else /* file too large to mmap (probably) */
      {
        FILE *Fin;
        Fin = fopen(argv[optind],"rb");
        if (Fin)
        {
          Sum = SumComputeFile(Fin);
          if (Sum)
          {
            fputs("fuid=\"",ListOutFile);
            for(i=0; i<20; i++)
            { fprintf(ListOutFile,"%02X",Sum->SHA1digest[i]); }
            fputs(".",ListOutFile);
            for(i=0; i<16; i++)
            { fprintf(ListOutFile,"%02X",Sum->MD5digest[i]); }
            fputs(".",ListOutFile);
            fprintf(ListOutFile,"%Lu",(long long unsigned int)Sum->DataLen);
            fputs("\" ",ListOutFile);
            free(Sum);
          }
          fclose(Fin);
        }
      } /* else no CF */
      fprintf(ListOutFile,">\n"); /* end source XML */
    }
    if (Fname)	TraverseStart(Fname,"called by main via args",NewDir,Recurse);
    else		TraverseStart(argv[optind],"called by main",NewDir,Recurse);
    if (ListOutName != NULL) fprintf(ListOutFile,"</source>\n");
  } /* end for */

  /* free memory */
  if (Fname) { free(Fname); Fname=NULL; }

  /* process pfile from scheduler */
  if (Pfile)
  {
    if (0 == (rvExist1 = fo_RepExist2(REP_FILES,Pfile)))
    {
      Fname=fo_RepMkPath(REP_FILES,Pfile);
    }
    else if (0 == (rvExist2 = fo_RepExist2(REP_GOLD,Pfile)))
    {
      Fname=fo_RepMkPath(REP_GOLD,Pfile);
      if (fo_RepImport(Fname,REP_FILES,Pfile,1) != 0)
      {
        LOG_ERROR("Failed to import '%s' as '%s' into the repository",Fname,Pfile)
        SafeExit(111);
      }
    }
    if (Fname)
    {
      TraverseStart(Fname,"called by main via env",NewDir,Recurse);
      free(Fname);
      Fname=NULL;
    }
    else
    {
      LOG_ERROR("NO file unpacked!");
      if (rvExist1 > 0)
      {
        Fname=fo_RepMkPath(REP_FILES, Pfile);
        LOG_ERROR("Error is %s for %s", strerror(rvExist1), Fname);
      }
      if (rvExist2 > 0)
      {
        Fname=fo_RepMkPath(REP_GOLD, Pfile);
        LOG_ERROR("Error is %s for %s", strerror(rvExist2), Fname);
      }
      SafeExit(112);
    }
  }

  /* recurse on all the children */
  if (Thread > 0) do
  {
    Pid = ParentWait();
    Thread--;
    if (Pid >= 0)
    {
      if (!Queue[Pid].ChildEnd)
      {
        /* copy over data */
        if (Recurse > 0)
          Traverse(Queue[Pid].ChildRecurse,NULL,"called by wait",NULL,Recurse-1,&Queue[Pid].PI);
        else if (Recurse < 0)
          Traverse(Queue[Pid].ChildRecurse,NULL,"called by wait",NULL,Recurse,&Queue[Pid].PI);
      }
    }
  } while(Pid >= 0);

  if (MagicCookie) magic_close(MagicCookie);
  if (ListOutFile)
  {
    fprintf(ListOutFile,"<summary files_regular=\"%d\" files_compressed=\"%d\" artifacts=\"%d\" directories=\"%d\" containers=\"%d\" />\n",
        TotalFiles,TotalCompressedFiles,TotalArtifacts,
        TotalDirectories,TotalContainers);
    fputs("</xml>\n",ListOutFile);
  }
  if (pgConn)
  {
    /* If it completes, mark it! */
    if (Upload_Pk)
    {
      snprintf(SQL,MAXSQL,"UPDATE upload SET upload_mode = (upload_mode | (1<<5)), uploadtree_tablename='%s' WHERE upload_pk = '%s';",uploadtree_tablename, Upload_Pk);
      result =  PQexec(pgConn, SQL); /* UPDATE upload */
      if (fo_checkPQcommand(pgConn, result, SQL, __FILE__ ,__LINE__)) SafeExit(113);
      PQclear(result);

      snprintf(SQL,MAXSQL,"UPDATE %s SET realparent = getItemParent(uploadtree_pk) WHERE upload_fk = '%s'",uploadtree_tablename, Upload_Pk);
      result =  PQexec(pgConn, SQL); /* UPDATE uploadtree */
      if (fo_checkPQcommand(pgConn, result, SQL, __FILE__ ,__LINE__)) SafeExit(114);
      PQclear(result);
    }

    if (ars_pk) fo_WriteARS(pgConn, ars_pk, atoi(Upload_Pk), agent_pk, AgentARSName, 0, 1);
  }
  if (ListOutFile && (ListOutFile != stdout))
  {
    fclose(ListOutFile);
  }

  if (UnlinkAll && MaxThread > 1)
  {
    /* Delete temporary files */
    if (strcmp(NewDir, ".")) RemoveDir(NewDir);
  }
 
  SafeExit(0);
  return(0);  // never executed but makes the compiler happy
} 
示例#25
0
文件: main.c 项目: rlintu/fossology
/**
 * \brief main function for the pkgagent
 *
 * There are 2 ways to use the pkgagent agent:
 *   1. Command Line Analysis :: test a rpm file from the command line
 *   2. Agent Based Analysis  :: run from the scheduler
 *
 * +-----------------------+
 * | Command Line Analysis |
 * +-----------------------+
 *
 * To analyze a rpm file from the command line:
 *   file :: if files are rpm package listed, display their meta data
 *   -v   :: verbose (-vv = more verbose)
 *
 *   example:
 *     $ ./pkgagent rpmfile
 *
 * +----------------------+
 * | Agent Based Analysis |
 * +----------------------+
 *
 * To run the pkgagent as an agent simply run with no command line args
 *   no file :: process data from the scheduler
 *   -i      :: initialize the database, then exit
 *
 *   example:
 *     $ upload_pk | ./pkgagent
 *
 * \param argc the number of command line arguments
 * \param argv the command line arguments
 * \return 0 on a successful program execution
 */
int	main	(int argc, char *argv[])
{
    int c;
    char *agent_desc = "Pulls metadata out of RPM or DEBIAN packages";
    //struct rpmpkginfo *glb_rpmpi;
    //struct debpkginfo *glb_debpi;
    int Agent_pk;
    int ars_pk = 0;

    int upload_pk = 0;           // the upload primary key
    int user_pk = 0;           // the upload primary key
    char *AgentARSName = "pkgagent_ars";
    int rv;
    PGresult *ars_result;
    char sqlbuf[1024];
    char *COMMIT_HASH;
    char *VERSION;
    char agent_rev[MAXCMD];
    int CmdlineFlag = 0; /* run from command line flag, 1 yes, 0 not */

    fo_scheduler_connect(&argc, argv, &db_conn);

    //glb_rpmpi = (struct rpmpkginfo *)malloc(sizeof(struct rpmpkginfo));
    //glb_debpi = (struct debpkginfo *)malloc(sizeof(struct debpkginfo));

    COMMIT_HASH = fo_sysconfig("pkgagent", "COMMIT_HASH");
    VERSION = fo_sysconfig("pkgagent", "VERSION");
    sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH);
    Agent_pk = fo_GetAgentKey(db_conn, basename(argv[0]), 0, agent_rev, agent_desc);

    /* Process command-line */
    while((c = getopt(argc,argv,"ic:CvVh")) != -1)
    {
        switch(c)
        {
        case 'i':
            PQfinish(db_conn);  /* DB was opened above, now close it and exit */
            exit(0);
        case 'v':
            Verbose++;
            break;
        case 'c':
            break; /* handled by fo_scheduler_connect() */
        case 'C':
            CmdlineFlag = 1;
            break;
        case 'V':
            printf("%s", BuildVersion);
            PQfinish(db_conn);
            return(0);
        default:
            Usage(argv[0]);
            PQfinish(db_conn);
            exit(-1);
        }
    }
    /* If no args, run from scheduler! */
    if (CmdlineFlag == 0)
    {
        user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */

        while(fo_scheduler_next())
        {
            upload_pk = atoi(fo_scheduler_current());

            /* Check Permissions */
            if (GetUploadPerm(db_conn, upload_pk, user_pk) < PERM_WRITE)
            {
                LOG_ERROR("You have no update permissions on upload %d", upload_pk);
                continue;
            }

            if (Verbose) {
                printf("PKG: pkgagent read %d\n", upload_pk);
            }
            if (upload_pk ==0) continue;

            /* check if pkgagent ars table exist?
             * if exist, check duplicate request
             * if not exist, don't check duplicate request
             */
            rv = fo_tableExists(db_conn, AgentARSName);
            if (rv)
            {
                /* check ars table to see if this is duplicate request*/
                snprintf(sqlbuf, sizeof(sqlbuf),
                         "select ars_pk from pkgagent_ars,agent \
          where agent_pk=agent_fk and ars_success=true \
          and upload_fk='%d' and agent_fk='%d'",
                         upload_pk, Agent_pk);
                ars_result = PQexec(db_conn, sqlbuf);
                if (fo_checkPQresult(db_conn, ars_result, sqlbuf, __FILE__, __LINE__)) exit(-1);
                if (PQntuples(ars_result) > 0)
                {
                    PQclear(ars_result);
                    LOG_WARNING("Ignoring requested pkgagent analysis of upload %d - Results are already in database.\n",upload_pk);
                    continue;
                }
                PQclear(ars_result);
            }
            /* Record analysis start in pkgagent_ars, the pkgagent audit trail. */
            ars_pk = fo_WriteARS(db_conn, ars_pk, upload_pk, Agent_pk, AgentARSName, 0, 0);

            /* process the upload_pk pkgagent */
            if(ProcessUpload(upload_pk) != 0) return -1;

            /* Record analysis success in pkgagent_ars. */
            if (ars_pk) fo_WriteARS(db_conn, ars_pk, upload_pk, Agent_pk, AgentARSName, 0, 1);
        }
    }
    else
    {
        if (Verbose) {
            printf("DEBUG: running in cli mode, processing file(s)\n");
        }
        for (; optind < argc; optind++)
        {
            struct rpmpkginfo *rpmpi;
            rpmpi = (struct rpmpkginfo *)malloc(sizeof(struct rpmpkginfo));
            rpmReadConfigFiles(NULL, NULL);
            //if(ProcessUpload(atoi(argv[optind])) == 0)
            if(GetMetadata(argv[optind],rpmpi) != -1)
                printf("OK\n");
            else
                printf("Fail\n");
#ifdef _RPM_4_4_COMPAT
            rpmFreeCrypto();
            int i;
            for(i=0; i< rpmpi->req_size; i++)
                free(rpmpi->requires[i]);
#endif /* After RPM4.4 version*/
            free(rpmpi->requires);
            free(rpmpi);
            rpmFreeMacros(NULL);
        }
    }

    PQfinish(db_conn);
    fo_scheduler_disconnect(0);
    return(0);
} /* main() */
示例#26
0
/*********************************************************
  \brief Scan an Upload for a regex - regular expression.
  \n gets a list of files in an upload and calls regexScan()

  \param uploadNum string containing upload_pk value
  \param regexStr string containing the regex
  \return i = number of files scanned, 0 = error
**********************************************************/
int regexScanUpload(char *uploadNum, char *regexStr)
{
  char sqlSelect[256];
  PGresult *result, *pfileResult;

  int fileCount, i, retCode;

  char fileRealName[1000];
  char fileRepoName[1000];

  FILE *scanFilePtr;

  regex_t regex;

  /* Ensure uploadNum is "valid" then obtain a list of pfile entries and scan them */
  sprintf(sqlSelect, "SELECT upload_pk, upload_mode, upload_filename  from upload where upload_pk = '%s'", uploadNum);
  result = PQexec(pgConn, sqlSelect);

  if (fo_checkPQresult(pgConn, result, sqlSelect, __FILE__, __LINE__)) return 0;

  /* confirm a sane result set */
  if (PQntuples(result) == 0)
  {
    fprintf(stderr, "No uploads appear to be available here!\n");
    PQclear(result);
    return 0;   /* nothing found to scan */
  }

  /* Next ensure that uploadNum was successfully uploaded */
  /* We'll only look at upload_pk entries that have successfully run ununpack (64) and adj2nest (32) */
  if ((atoi(PQgetvalue(result, 0, 1)) & 96) != 96)
  {
    fprintf(stderr, "Upload %s was not successfully processed after upload!\n", uploadNum);
    PQclear(result);
    return 0;   /* nothing found to scan */
  }

  /* Now get our list of required pfile entries for this upload */
  sprintf(sqlSelect, "SELECT uploadtree.pfile_fk, ufile_name from uploadtree, upload where upload_fk = upload_pk and uploadtree.pfile_fk <> 0 and ufile_mode = 32768 and upload_pk = '%s'", uploadNum);
  result = PQexec(pgConn, sqlSelect);

  if (fo_checkPQresult(pgConn, result, sqlSelect, __FILE__, __LINE__)) return 0;

  fileCount = PQntuples(result);
//  fprintf(stderr, "Located %d files to process.\n", fileCount);

  /* Compile the regex for improved performance */
  retCode = regcomp(&regex, regexStr, REG_ICASE+REG_EXTENDED);
  if (retCode)
  {
    fprintf(stderr, "regex %s failed to compile\n", regexStr);
    return 1;
  }

  /* Scan the files we've found for this upload */
  for (i=0; i<fileCount; i++)
  {
    /* Attempt to locate the appropriate pFile_pk record */
    sprintf(sqlSelect, "SELECT pfile_sha1, pfile_md5, pfile_size, ufile_name FROM pfile, uploadtree WHERE pfile_fk = pfile_pk and pfile_pk = '%s'", PQgetvalue(result, i, 0));
    pfileResult = PQexec(pgConn, sqlSelect);

    if (fo_checkPQresult(pgConn, pfileResult, sqlSelect, __FILE__, __LINE__)) return 0;

    /* confirm a sane result set */
    if (PQntuples(pfileResult) == 1)
    {
      /* For each pfile value grind through the regex scan process */

      /* Locate and construct the appropriate full name from pfile table based upon pfile_pk value */
      if (pfileNumToNames(PQgetvalue(result, i, 0), fileRepoName, fileRealName) != 0)
      {
        fprintf(stderr, "ERROR: Unable to locate pfile_pk '%s'\n", PQgetvalue(result, i, 0));
        return 0;
      }

      /* Use fo_RepFread() for access. It uses fo_RepMkPath() to map name to full path. */
      scanFilePtr = fo_RepFread("files", fileRepoName);
      if (!scanFilePtr)
      {
        fprintf(stderr, "ERROR: Unable to open '%s/%s'\n", "files", fileRepoName);
        return 0;
      }

    /* Call scan function. Note that we'll need to "Humanize" the fileName at some point. */
    regexScan(&regex, regexStr, scanFilePtr, fileRealName);
    }
    else
    {
      fprintf(stderr, "WARNING: File: %s - Located %d instances of pfile_pk %s ! Size = %s bytes!\n", PQgetvalue(result, i, 1), PQntuples(pfileResult), PQgetvalue(result, i, 0), PQgetvalue(pfileResult, i, 2));
    }
  }
  /* return the number of scanned files */
  return i;
}
示例#27
0
/**
 * \brief Test pkgagent.c function RecordMetadataDEB()
 * \test
 * -# Create a test upload in pfile table
 * -# Create debpkginfo object and populate it
 * -# Call RecordMetadataDEB()
 * -# Check if meta data got inserted in DB
 */
void test_RecordMetadataDEB()
{
  struct debpkginfo *pi;
  int data_size, i, j;
  char SQL[MAXSQL];
  PGresult *result;
  char Fuid[1024];
  //char *DBConfFile = NULL;  /* use default Db.conf */
  char *ErrorBuf;

  for(i=0; i<20; i++) { sprintf(Fuid+0+i*2,"%02X",'s'); }
  Fuid[40]='.';
  for(i=0; i<16; i++) { sprintf(Fuid+41+i*2,"%02X",'m'); }
  Fuid[73]='.';
  snprintf(Fuid+74,sizeof(Fuid)-74,"%Lu",(long long unsigned int)100);

  pi = (struct debpkginfo *)malloc(sizeof(struct debpkginfo));
  memset(pi, 0, sizeof(struct debpkginfo));
  int predictValue = 0;

  /* perpare testing data in database */
  db_conn = fo_dbconnect(DBConfFile, &ErrorBuf);
  snprintf(SQL,MAXSQL,"INSERT INTO pfile (pfile_sha1,pfile_md5,pfile_size) VALUES ('%.40s','%.32s','%s');",
          Fuid,Fuid+41,Fuid+74);
  result =  PQexec(db_conn, SQL);
  if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
  {
    printf("Perpare pfile information ERROR!\n");
    free(pi);
    exit(-1);
  }
  PQclear(result);
  memset(SQL,'\0',MAXSQL);
  snprintf(SQL,MAXSQL,"SELECT pfile_pk FROM pfile WHERE pfile_sha1 = '%.40s' AND pfile_md5 = '%.32s' AND pfile_size = '%s';",
        Fuid,Fuid+41,Fuid+74);
  result =  PQexec(db_conn, SQL);
  if (fo_checkPQresult(db_conn, result, SQL, __FILE__, __LINE__))
  {
    printf("Get pfile information ERROR!\n");
    exit(-1);
  }
  pi->pFileFk = atoi(PQgetvalue(result, 0, 0));
  PQclear(result);
  strncpy(pi->pkgName, "Test Pkg", sizeof(pi->pkgName));
  strncpy(pi->pkgArch, "Test Arch", sizeof(pi->pkgArch));
  strncpy(pi->version, "Test version", sizeof(pi->version));
  strncpy(pi->maintainer, "Test maintainer", sizeof(pi->maintainer));
  strncpy(pi->description, "Test description", sizeof(pi->description));
  strncpy(pi->section, "Test section", sizeof(pi->section));
  strncpy(pi->priority, "Test priority", sizeof(pi->priority));
  strncpy(pi->homepage, "Test homepage", sizeof(pi->homepage));
  strncpy(pi->source, "Test source", sizeof(pi->source));
  strncpy(pi->summary, "Test summary", sizeof(pi->summary));
  strncpy(pi->format, "Test format", sizeof(pi->format));
  strncpy(pi->uploaders, "Test uploaders", sizeof(pi->uploaders));
  strncpy(pi->standardsVersion, "Test standard", sizeof(pi->standardsVersion));
  pi->installedSize = 0;

  data_size = 2;
  pi->depends = calloc(data_size, sizeof(char *));
  for (j=0; j<data_size;j++){
    pi->depends[j] = malloc(MAXCMD);
    strcpy(pi->depends[j],"Test depends");
  }
  pi->dep_size = data_size;

  /* Test RecordMetadataRPM function */
  int Result = RecordMetadataDEB(pi);
  printf("RecordMetadataDEB Result is:%d\n", Result);

  /* Check data correction */
  memset(SQL,'\0',MAXSQL);
  snprintf(SQL,MAXSQL,"SELECT pkg_pk, pkg_name, pkg_arch, version, maintainer, description FROM pkg_deb INNER JOIN pfile ON pfile_fk = '%ld' AND pfile_fk = pfile_pk;", pi->pFileFk);
  result =  PQexec(db_conn, SQL);
  if (fo_checkPQresult(db_conn, result, SQL, __FILE__, __LINE__))
  {
    printf("Get pkg information ERROR!\n");
    PQclear(result);
    free(pi);
    exit(-1);
  }
  CU_ASSERT_STRING_EQUAL(PQgetvalue(result, 0, 1), "Test Pkg");
  CU_ASSERT_STRING_EQUAL(PQgetvalue(result, 0, 2), "Test Arch");
  CU_ASSERT_STRING_EQUAL(PQgetvalue(result, 0, 3), "Test version");
  CU_ASSERT_STRING_EQUAL(PQgetvalue(result, 0, 4), "Test maintainer");
  CU_ASSERT_STRING_EQUAL(PQgetvalue(result, 0, 5), "Test description");
  PQclear(result);


  /* Clear testing data in database */
  memset(SQL,'\0',MAXSQL);
  snprintf(SQL,MAXSQL,"DELETE FROM pkg_deb_req WHERE pkg_fk IN (SELECT pkg_pk FROM pkg_deb WHERE pfile_fk = '%ld');", pi->pFileFk);
  result =  PQexec(db_conn, SQL);
  if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
  {
    printf("Clear pkg_deb_req test data ERROR!\n");
    PQclear(result);
    free(pi);
    exit(-1);
  }
  PQclear(result);
  memset(SQL,'\0',MAXSQL);
  snprintf(SQL,MAXSQL,"DELETE FROM pkg_deb WHERE pfile_fk = '%ld';", pi->pFileFk);
  result =  PQexec(db_conn, SQL);
  if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
  {
    printf("Clear pkg_deb test data ERROR!\n");
    PQclear(result);
    free(pi);
    exit(-1);
  }
  PQclear(result);
  memset(SQL,'\0',MAXSQL);
  snprintf(SQL,MAXSQL,"DELETE FROM pfile WHERE pfile_pk = '%ld'", pi->pFileFk);
  result =  PQexec(db_conn, SQL);
  if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
  {
    printf("Clear pfile test data ERROR!\n");
    PQclear(result);
    free(pi);
    exit(-1);
  }
  PQclear(result);

  PQfinish(db_conn);
  int k;
  for(k=0; k< pi->dep_size;k++)
    free(pi->depends[k]);
  free(pi->depends);
  memset(pi,0,sizeof(struct debpkginfo));
  free(pi);
  CU_ASSERT_EQUAL(Result, predictValue);
}
/**
 * \brief Prepare database
 *
 * \param db_conn the database connection
 * \param pi the pointer of debpkginfo
 *
 * \return upload_pk on OK, -1 on failure
 */
long prepare_Database(PGconn *db_conn, struct debpkginfo *pi)
{
    long upload_pk;
    char SQL[MAXSQL];
    PGresult *result;
    long control_pfilepk;

    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"BEGIN;");
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
    {
        printf("Perpare pfile information ERROR!\n");
        return (-1);
    }
    PQclear(result);

    /* insert mimetype */
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"INSERT INTO mimetype (mimetype_name) VALUES ('application/x-rpm');");
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
    {
        printf("Perpare mimetype information ERROR!\n");
        return (-1);
    }
    PQclear(result);
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"INSERT INTO mimetype (mimetype_name) VALUES ('application/x-debian-package');");
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
    {
        printf("Perpare mimetype information ERROR!\n");
        return (-1);
    }
    PQclear(result);
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"INSERT INTO mimetype (mimetype_name) VALUES ('application/x-debian-source');");
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
    {
        printf("Perpare mimetype information ERROR!\n");
        return (-1);
    }
    PQclear(result);

    /* insert pfile: fossology-web_1.4.1_all.deb */
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"INSERT INTO pfile (pfile_sha1,pfile_md5,pfile_size) VALUES ('%.40s','%.32s','%s');",
             "AF1DF2C4B32E4115DB5F272D9EFD0E674CF2A0BC","2239AA7DAC291B6F8D0A56396B1B8530","4560");
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
    {
        printf("Perpare pfile information ERROR!\n");
        return (-1);
    }
    PQclear(result);
    /* insert pfile: control */
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"INSERT INTO pfile (pfile_sha1,pfile_md5,pfile_size) VALUES ('%.40s','%.32s','%s');",
             "F1D2319DF20ABC4CEB02CA5A3C2021BD87B26810","87972FC55E2CDD2609ED85051BE50BAF","722");
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
    {
        printf("Perpare pfile information ERROR!\n");
        return (-1);
    }
    PQclear(result);

    /* select pfile_pk: fossology-web_1.4.1_all.deb */
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"SELECT pfile_pk FROM pfile WHERE pfile_sha1 = '%.40s' AND pfile_md5 = '%.32s' AND pfile_size = '%s';",
             "AF1DF2C4B32E4115DB5F272D9EFD0E674CF2A0BC","2239AA7DAC291B6F8D0A56396B1B8530","4560");
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQresult(db_conn, result, SQL, __FILE__, __LINE__))
    {
        printf("Get pfile information ERROR!\n");
        return (-1);
    }
    pi->pFileFk = atoi(PQgetvalue(result, 0, 0));
    PQclear(result);

    /* select pfile_pk: control */
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"SELECT pfile_pk FROM pfile WHERE pfile_sha1 = '%.40s' AND pfile_md5 = '%.32s' AND pfile_size = '%s';",
             "F1D2319DF20ABC4CEB02CA5A3C2021BD87B26810","87972FC55E2CDD2609ED85051BE50BAF","722");
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQresult(db_conn, result, SQL, __FILE__, __LINE__))
    {
        printf("Get pfile information ERROR!\n");
        return (-1);
    }
    control_pfilepk = atoi(PQgetvalue(result, 0, 0));
    PQclear(result);

    /* insert upload: fossology-web_1.4.1_all.deb */
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"INSERT INTO upload (upload_filename,upload_mode,upload_ts,pfile_fk) VALUES ('%s',40,now(),%ld);",
             "fossology-web_1.4.1_all.deb", pi->pFileFk);
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
    {
        printf("Perpare pfile information ERROR!\n");
        exit(-1);
    }
    PQclear(result);
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"SELECT upload_pk FROM upload WHERE pfile_fk = '%ld';",
             pi->pFileFk);
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQresult(db_conn, result, SQL, __FILE__, __LINE__))
    {
        printf("Get pfile information ERROR!\n");
        return (-1);
    }
    upload_pk = atoi(PQgetvalue(result, 0, 0));
    PQclear(result);

    /* insert uploadtree: fossology-web_1.4.1_all.deb */
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"INSERT INTO uploadtree (upload_fk,pfile_fk,lft,rgt,ufile_name) VALUES (%ld,%ld,1,48,'fossology-web_1.4.1_all.deb');",
             upload_pk, pi->pFileFk);
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
    {
        printf("Perpare pfile information ERROR!\n");
        return (-1);
    }
    PQclear(result);
    /* insert uploadtree: control */
    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"INSERT INTO uploadtree (upload_fk,pfile_fk,lft,rgt,ufile_name) VALUES (%ld,%ld,9,10,'control');",
             upload_pk, control_pfilepk);
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
    {
        printf("Perpare pfile information ERROR!\n");
        return (-1);
    }
    PQclear(result);

    memset(SQL,'\0',MAXSQL);
    snprintf(SQL,MAXSQL,"COMMIT;");
    result =  PQexec(db_conn, SQL);
    if (fo_checkPQcommand(db_conn, result, SQL, __FILE__ ,__LINE__))
    {
        printf("Perpare pfile information ERROR!\n");
        return (-1);
    }
    PQclear(result);

    return upload_pk;
}
示例#29
0
int main(int argc, char **argv)
{
  char *agentDesc = "Bucket agent";
  int cmdopt;
  int verbose = 0;
  int ReadFromStdin = 1;
  int head_uploadtree_pk = 0;
  PGconn *pgConn;
  PGresult *topresult;
  PGresult *result;
  char sqlbuf[512];
  char *Delims = ",= \t\n\r";
  char *token, *saveptr;
  int agent_pk = 0;
  int nomos_agent_pk = 0;
  int bucketpool_pk = 0;
  int ars_pk = 0;
  int readnum = 0;
  int rv;
  int hasPrules;
  int user_pk = 0;
  char *bucketpool_name;
  char *COMMIT_HASH;
  char *VERSION;
  char *uploadtree_tablename;
  char agent_rev[myBUFSIZ];
  int rerun = 0;


//  int *bucketList;
  pbucketdef_t bucketDefArray = 0;
  pbucketdef_t tmpbucketDefArray = 0;
  cacheroot_t  cacheroot;
  uploadtree_t  uploadtree;
  uploadtree.upload_fk = 0;

  /* connect to the scheduler */
  fo_scheduler_connect(&argc, argv, &pgConn);
  user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */

  /* command line options */
  while ((cmdopt = getopt(argc, argv, "rin:p:t:u:vc:hV")) != -1)
  {
    switch (cmdopt)
    {
      case 'i': /* "Initialize" */
            PQfinish(pgConn);
            exit(0);
      case 'n': /* bucketpool_name  */
            ReadFromStdin = 0;
            bucketpool_name = optarg;
            /* find the highest rev active bucketpool_pk */
            if (!bucketpool_pk)
            {
              bucketpool_pk = getBucketpool_pk(pgConn, bucketpool_name);
              if (!bucketpool_pk)
                printf("%s is not an active bucketpool name.\n", bucketpool_name);
            }
            break;
      case 'p': /* bucketpool_pk */
            ReadFromStdin = 0;
            bucketpool_pk = atoi(optarg);
            /* validate bucketpool_pk */
            sprintf(sqlbuf, "select bucketpool_pk from bucketpool where bucketpool_pk=%d and active='Y'", bucketpool_pk);
            bucketpool_pk = validate_pk(pgConn, sqlbuf);
            if (!bucketpool_pk)
              printf("%d is not an active bucketpool_pk.\n", atoi(optarg));
            break;
      case 't': /* uploadtree_pk */
            ReadFromStdin = 0;
            if (uploadtree.upload_fk) break;
            head_uploadtree_pk = atoi(optarg);
            /* validate bucketpool_pk */
            sprintf(sqlbuf, "select uploadtree_pk from uploadtree where uploadtree_pk=%d", head_uploadtree_pk);
            head_uploadtree_pk = validate_pk(pgConn, sqlbuf);
            if (!head_uploadtree_pk)
              printf("%d is not an active uploadtree_pk.\n", atoi(optarg));
            break;
      case 'u': /* upload_pk */
            ReadFromStdin = 0;
            if (!head_uploadtree_pk)
            {
              uploadtree.upload_fk = atoi(optarg);
              /* validate upload_pk  and get uploadtree_pk  */
              sprintf(sqlbuf, "select upload_pk from upload where upload_pk=%d", uploadtree.upload_fk);
              uploadtree.upload_fk = validate_pk(pgConn, sqlbuf);
              if (!uploadtree.upload_fk)
                printf("%d is not an valid upload_pk.\n", atoi(optarg));
              else
              {
                sprintf(sqlbuf, "select uploadtree_pk from uploadtree where upload_fk=%d and parent is null", uploadtree.upload_fk);
                head_uploadtree_pk = validate_pk(pgConn, sqlbuf);
              }
            }
            break;
      case 'v': /* verbose output for debugging  */
            verbose++;
            break;
      case 'c': break; /* handled by fo_scheduler_connect() */
      case 'r':
            rerun = 1; /** rerun bucket */
            break;
      case 'V': /* print version info */
            printf("%s", BuildVersion);
            PQfinish(pgConn);
            exit(0);
      default:
            Usage(argv[0]);
            PQfinish(pgConn);
            exit(-1);
    }
  }
  debug = verbose;

  /*** validate command line ***/
  if (!bucketpool_pk && !ReadFromStdin)
  {
    printf("FATAL: You must specify an active bucketpool.\n");
    Usage(argv[0]);
    exit(-1);
  }
  if (!head_uploadtree_pk && !ReadFromStdin)
  {
    printf("FATAL: You must specify a valid uploadtree_pk or upload_pk.\n");
    Usage(argv[0]);
    exit(-1);
  }

  /* get agent pk
   * Note, if GetAgentKey fails, this process will exit.
   */
  COMMIT_HASH = fo_sysconfig("buckets", "COMMIT_HASH");
  VERSION = fo_sysconfig("buckets", "VERSION");
  sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH);
  agent_pk = fo_GetAgentKey(pgConn, basename(argv[0]), uploadtree.upload_fk, agent_rev, agentDesc);

  /*** Initialize the license_ref table cache ***/
  /* Build the license ref cache to hold 2**11 (2048) licenses.
     This MUST be a power of 2.
   */
  cacheroot.maxnodes = 2<<11;
  cacheroot.nodes = calloc(cacheroot.maxnodes, sizeof(cachenode_t));
  if (!lrcache_init(pgConn, &cacheroot))
  {
    printf("FATAL: Bucket agent could not allocate license_ref table cache.\n");
    exit(1);
  }


  /* main processing loop */
  while(++readnum)
  {
    uploadtree.upload_fk = 0;
    if (ReadFromStdin)
    {
      bucketpool_pk = 0;

      /* Read the bucketpool_pk and upload_pk from stdin.
       * Format looks like 'bppk=123, upk=987'
       */
      if (!fo_scheduler_next()) break;

      token = strtok_r(fo_scheduler_current(), Delims, &saveptr);
      while (token && (!uploadtree.upload_fk || !bucketpool_pk))
      {
        if (strcmp(token, "bppk") == 0)
        {
          bucketpool_pk = atoi(strtok_r(NULL, Delims, &saveptr));
        }
        else
        if (strcmp(token, "upk") == 0)
        {
          uploadtree.upload_fk = atoi(strtok_r(NULL, Delims, &saveptr));
        }
        token = strtok_r(NULL, Delims, &saveptr);
      }

      /* Check Permissions */
      if (GetUploadPerm(pgConn, uploadtree.upload_fk, user_pk) < PERM_WRITE)
      {
        LOG_ERROR("You have no update permissions on upload %d", uploadtree.upload_fk);
        continue;
      }

      /* From the upload_pk, get the head of the uploadtree, pfile_pk and ufile_name  */
      sprintf(sqlbuf, "select uploadtree_pk, pfile_fk, ufile_name, ufile_mode,lft,rgt from uploadtree \
             where upload_fk='%d' and parent is null limit 1", uploadtree.upload_fk);
      topresult = PQexec(pgConn, sqlbuf);
      if (fo_checkPQresult(pgConn, topresult, sqlbuf, agentDesc, __LINE__)) return -1;
      if (PQntuples(topresult) == 0)
      {
        printf("ERROR: %s.%s missing upload_pk %d.\nsql: %s",
               __FILE__, agentDesc, uploadtree.upload_fk, sqlbuf);
        PQclear(topresult);
        continue;
      }
      head_uploadtree_pk = atol(PQgetvalue(topresult, 0, 0));
      uploadtree.uploadtree_pk = head_uploadtree_pk;
      uploadtree.upload_fk = uploadtree.upload_fk;
      uploadtree.pfile_fk = atol(PQgetvalue(topresult, 0, 1));
      uploadtree.ufile_name = strdup(PQgetvalue(topresult, 0, 2));
      uploadtree.ufile_mode = atoi(PQgetvalue(topresult, 0, 3));
      uploadtree.lft = atoi(PQgetvalue(topresult, 0, 4));
      uploadtree.rgt = atoi(PQgetvalue(topresult, 0, 5));
      PQclear(topresult);
    } /* end ReadFromStdin */
    else
    {
      /* Only one input to process if from command line, so terminate if it's been done */
      if (readnum > 1) break;

      /* not reading from stdin
       * Get the pfile, and ufile_name for head_uploadtree_pk
       */
      sprintf(sqlbuf, "select pfile_fk, ufile_name, ufile_mode,lft,rgt, upload_fk from uploadtree where uploadtree_pk=%d", head_uploadtree_pk);
      topresult = PQexec(pgConn, sqlbuf);
      if (fo_checkPQresult(pgConn, topresult, sqlbuf, agentDesc, __LINE__))
      {
        free(uploadtree.ufile_name);
        return -1;
      }
      if (PQntuples(topresult) == 0)
      {
        printf("FATAL: %s.%s missing root uploadtree_pk %d\n",
               __FILE__, agentDesc, head_uploadtree_pk);
        PQclear(topresult);
        continue;
      }
      uploadtree.uploadtree_pk = head_uploadtree_pk;
      uploadtree.pfile_fk = atol(PQgetvalue(topresult, 0, 0));
      uploadtree.ufile_name = strdup(PQgetvalue(topresult, 0, 1));
      uploadtree.ufile_mode = atoi(PQgetvalue(topresult, 0, 2));
      uploadtree.lft = atoi(PQgetvalue(topresult, 0, 3));
      uploadtree.rgt = atoi(PQgetvalue(topresult, 0, 4));
      uploadtree.upload_fk = atoi(PQgetvalue(topresult, 0, 5));
      PQclear(topresult);
    }

    /* Find the most recent nomos data for this upload.  That's what we want to use
         to process the buckets.
     */
    nomos_agent_pk = LatestNomosAgent(pgConn, uploadtree.upload_fk);
    if (nomos_agent_pk == 0)
    {
      printf("WARNING: Bucket agent called on treeitem (%d), but the latest nomos agent hasn't created any license data for this tree.\n",
            head_uploadtree_pk);
      continue;
    }

    /* at this point we know:
     * bucketpool_pk, bucket agent_pk, nomos agent_pk, upload_pk,
     * pfile_pk, and head_uploadtree_pk (the uploadtree_pk of the head tree to scan)
     */

    /* Has the upload already been processed?  If so, we are done.
       Don't even bother to create a bucket_ars entry.
     */
    switch (UploadProcessed(pgConn, agent_pk, nomos_agent_pk, uploadtree.pfile_fk, head_uploadtree_pk, uploadtree.upload_fk, bucketpool_pk))
    {
      case 1:  /* upload has already been processed */
        if (1 == rerun) break;
        printf("LOG: Duplicate request for bucket agent to process upload_pk: %d, uploadtree_pk: %d, bucketpool_pk: %d, bucket agent_pk: %d, nomos agent_pk: %d, pfile_pk: %d ignored.\n",
             uploadtree.upload_fk, head_uploadtree_pk, bucketpool_pk, agent_pk, nomos_agent_pk, uploadtree.pfile_fk);
        continue;
      case -1: /* SQL error, UploadProcessed() wrote error message */
        continue;
      case 0:  /* upload has not been processed */
        break;
    }

    /*** Initialize the Bucket Definition List bucketDefArray  ***/
    bucketDefArray = initBuckets(pgConn, bucketpool_pk, &cacheroot);
    if (bucketDefArray == 0)
    {
      printf("FATAL: %s.%d Bucket definition for pool %d could not be initialized.\n",
             __FILE__, __LINE__, bucketpool_pk);
      exit(-2);
    }
    bucketDefArray->nomos_agent_pk = nomos_agent_pk;
    bucketDefArray->bucket_agent_pk = agent_pk;

    /* Find the correct uploadtree table name */
    uploadtree_tablename = GetUploadtreeTableName(pgConn, uploadtree.upload_fk);
    if (!(uploadtree_tablename))
    {
      LOG_FATAL("buckets passed invalid upload, upload_pk = %d", uploadtree.upload_fk);
      return(-110);
    }

    /* set uploadtree_tablename in all the bucket definition structs */
    for (tmpbucketDefArray = bucketDefArray; tmpbucketDefArray->bucket_pk; tmpbucketDefArray++)
    {
      tmpbucketDefArray->uploadtree_tablename = uploadtree_tablename;
    }

    /* loop through rules (bucket defs) to see if there are any package only rules */
    hasPrules = 0;
    for (tmpbucketDefArray = bucketDefArray; tmpbucketDefArray->bucket_pk; tmpbucketDefArray++)
      if (tmpbucketDefArray->applies_to == 'p')
      {
        hasPrules = 1;
        break;
      }

    /*** END initializing bucketDefArray  ***/

    /*** Initialize DEB_SOURCE and DEB_BINARY  ***/
    sprintf(sqlbuf, "select mimetype_pk from mimetype where mimetype_name='application/x-debian-package'");
    result = PQexec(pgConn, sqlbuf);
    if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
    if (PQntuples(result) == 0)
    {
      printf("FATAL: (%s.%d) Missing application/x-debian-package mimetype.\n",__FILE__,__LINE__);
      return -1;
    }
    DEB_BINARY = atoi(PQgetvalue(result, 0, 0));
    PQclear(result);

    sprintf(sqlbuf, "select mimetype_pk from mimetype where mimetype_name='application/x-debian-source'");
    result = PQexec(pgConn, sqlbuf);
    if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
    if (PQntuples(result) == 0)
    {
      printf("FATAL: (%s.%d) Missing application/x-debian-source mimetype.\n",__FILE__,__LINE__);
      return -1;
    }
    DEB_SOURCE = atoi(PQgetvalue(result, 0, 0));
    PQclear(result);
    /*** END Initialize DEB_SOURCE and DEB_BINARY  ***/

    /*** Record analysis start in bucket_ars, the bucket audit trail. ***/
    if (0 == rerun) { // do not have any bucket scan on this upload
      snprintf(sqlbuf, sizeof(sqlbuf),
          "insert into bucket_ars (agent_fk, upload_fk, ars_success, nomosagent_fk, bucketpool_fk) values(%d,%d,'%s',%d,%d)",
          agent_pk, uploadtree.upload_fk, "false", nomos_agent_pk, bucketpool_pk);
      if (debug)
        printf("%s(%d): %s\n", __FILE__, __LINE__, sqlbuf);

      result = PQexec(pgConn, sqlbuf);
      if (fo_checkPQcommand(pgConn, result, sqlbuf, __FILE__ ,__LINE__)) return -1;
      PQclear(result);

      /* retrieve the ars_pk of the newly inserted record */
      sprintf(sqlbuf, "select ars_pk from bucket_ars where agent_fk='%d' and upload_fk='%d' and ars_success='%s' and nomosagent_fk='%d' \
          and bucketpool_fk='%d' and ars_endtime is null \
          order by ars_starttime desc limit 1",
          agent_pk, uploadtree.upload_fk, "false", nomos_agent_pk, bucketpool_pk);
      result = PQexec(pgConn, sqlbuf);
      if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
      if (PQntuples(result) == 0)
      {
        printf("FATAL: (%s.%d) Missing bucket_ars record.\n%s\n",__FILE__,__LINE__,sqlbuf);
        return -1;
      }
      ars_pk = atol(PQgetvalue(result, 0, 0));
      PQclear(result);
    }
    /*** END bucket_ars insert  ***/

    if (debug) printf("%s sql: %s\n",__FILE__, sqlbuf);

    /* process the tree for buckets
       Do this as a single transaction, therefore this agent must be
       run as a single thread.  This will prevent the scheduler from
       consuming excess time (this is a fast agent), and allow this
       process to update bucket_ars.
     */
    rv = walkTree(pgConn, bucketDefArray, agent_pk, head_uploadtree_pk, 0,
             hasPrules);
    /* if no errors and top level is a container, process the container */
    if ((!rv) && (IsContainer(uploadtree.ufile_mode)))
    {
      rv = processFile(pgConn, bucketDefArray, &uploadtree, agent_pk, hasPrules);
    }

    /* Record analysis end in bucket_ars, the bucket audit trail. */
    if (0 == rerun && ars_pk)
    {
      if (rv)
        snprintf(sqlbuf, sizeof(sqlbuf),
                "update bucket_ars set ars_endtime=now(), ars_success=false where ars_pk='%d'",
                ars_pk);
      else
        snprintf(sqlbuf, sizeof(sqlbuf),
                "update bucket_ars set ars_endtime=now(), ars_success=true where ars_pk='%d'",
                ars_pk);

      if (debug)
        printf("%s(%d): %s\n", __FILE__, __LINE__, sqlbuf);

      result = PQexec(pgConn, sqlbuf);
      if (fo_checkPQcommand(pgConn, result, sqlbuf, __FILE__ ,__LINE__)) return -1;
      PQclear(result);
    }
  }  /* end of main processing loop */
示例#30
0
/**
 * \brief Get the mimetype for a package
 * \param argc the number of command line arguments
 * \param argv the command line arguments
 * \return 0 on a successful program execution
 */
int main(int argc, char *argv[])
{
  int arg;
  char *Parm = NULL;
  char *Path = NULL;
  int c;
  char *agent_desc = "Determines mimetype for each file";
  int pfile_count = 0;
  int Agent_pk;
  int ars_pk = 0;

  int upload_pk = 0;           // the upload primary key
  int user_pk = 0;
  char *AgentARSName = "mimetype_ars";
  int rv;
  PGresult *result;
  char sqlbuf[1024];
  int CmdlineFlag = 0;        ///< run from command line flag, 1 yes, 0 not
  char *COMMIT_HASH;
  char *VERSION;
  char agent_rev[MAXCMD];

  /* initialize the scheduler connection */
  fo_scheduler_connect(&argc, argv, &pgConn);

  /* Process command-line */
  while((c = getopt(argc,argv,"iCc:hvV")) != -1)
  {
    switch(c)
    {
      case 'i':
        PQfinish(pgConn);
        return(0);
      case 'c':
        /* do nothing with this option */
        break;
      case 'C':
        CmdlineFlag = 1;
        break;
      case 'v':
        agent_verbose++;
        break;
      case 'V':
        printf("%s", BuildVersion);
        PQfinish(pgConn);
        return(0);
      default:
        Usage(argv[0]);
        PQfinish(pgConn);
        exit(-1);
    }
  }

  COMMIT_HASH = fo_sysconfig("mimetype", "COMMIT_HASH");
  VERSION = fo_sysconfig("mimetype", "VERSION");
  sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH);
  /* Get the Agent Key from the DB */
  Agent_pk = fo_GetAgentKey(pgConn, basename(argv[0]), 0, agent_rev, agent_desc);

  FMimetype = fopen("/etc/mime.types","rb");
  if (!FMimetype)
  {
    LOG_WARNING("Unable to open /etc/mime.types\n");
  }

  MagicCookie = magic_open(MAGIC_PRESERVE_ATIME|MAGIC_MIME);
  if (MagicCookie == NULL)
  {
    LOG_FATAL("Failed to initialize magic cookie\n");
    PQfinish(pgConn);
    exit(-1);
  }
  if (magic_load(MagicCookie,NULL) != 0)
  {
    LOG_FATAL("Failed to load magic file: UnMagic\n");
    PQfinish(pgConn);
    exit(-1);
  }

  /* Run from the command-line (for testing) */
  for(arg=optind; arg < argc; arg++)
  {
    Akey = -1;
    memset(A,'\0',sizeof(A));
    strncpy(A,argv[arg],sizeof(A));
    DBCheckMime(A);
  }

  /* Run from scheduler! */
  if (0 == CmdlineFlag)
  {
    user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */

    while(fo_scheduler_next())
    {
      /* get piece of information, including upload_pk, others */
      Parm = fo_scheduler_current();
      if (Parm && Parm[0])
      {
        upload_pk = atoi(Parm);

        /* Check Permissions */
        if (GetUploadPerm(pgConn, upload_pk, user_pk) < PERM_WRITE)
        {
          LOG_ERROR("You have no update permissions on upload %d", upload_pk);
          continue;
        }

        /* does ars table exist?
         * If not, create it.
         */
        rv = fo_tableExists(pgConn, AgentARSName);
        if (!rv)
        {
          rv = fo_CreateARSTable(pgConn, AgentARSName);
          if (!rv) return(0);
        }

        /* check ars table if this is duplicate request*/
        memset(sqlbuf, 0, sizeof(sqlbuf));
        snprintf(sqlbuf, sizeof(sqlbuf),
            "select ars_pk from mimetype_ars,agent \
            where agent_pk=agent_fk and ars_success=true \
            and upload_fk='%d' and agent_fk='%d'",
            upload_pk, Agent_pk);
        result = PQexec(pgConn, sqlbuf);
        if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) exit(-1);
        if (PQntuples(result) > 0)
        {
          PQclear(result);
          LOG_WARNING("Ignoring requested mimetype analysis of upload %d - Results are already in database.\n",upload_pk);
          continue;
        }
        PQclear(result);

        /* Record analysis start in mimetype_ars, the mimetype audit trail. */
        ars_pk = fo_WriteARS(pgConn, ars_pk, upload_pk, Agent_pk, AgentARSName, 0, 0);

        /* get all pfile ids on a upload record */
        memset(sqlbuf, 0, sizeof(sqlbuf));
        snprintf(sqlbuf, sizeof(sqlbuf), "SELECT DISTINCT(pfile_pk) as Akey, pfile_sha1 || '.' || pfile_md5 || '.' || pfile_size AS A FROM uploadtree, pfile WHERE uploadtree.pfile_fk = pfile.pfile_pk AND pfile_mimetypefk is NULL AND upload_fk = '%d';", upload_pk);
        result = PQexec(pgConn, sqlbuf);
        if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) exit(-1);
        pfile_count = PQntuples(result);
        int i;
        for(i=0; i < pfile_count; i++)
        {
          Akey = atoi(PQgetvalue(result, i, 0));
          strncpy(A, PQgetvalue(result, i, 1), sizeof(A));
          if (Akey <= 0 || A[0]=='\0')
          {
            printf("ERROR: Data is in an unknown format.\n");
            PQfinish(pgConn);
            exit(-1);
          }

          /* Process the repository file */
          /* Find the path */
          Path = fo_RepMkPath("files",A);
          if (Path && fo_RepExist("files",A))
          {
            /* Get the mimetype! */
            DBCheckMime(Path);
          }
          else
          {
            printf("ERROR pfile %d Unable to process.\n",Akey);
            printf("LOG pfile %d File '%s' not found.\n",Akey,A);
            PQfinish(pgConn);
            exit(-1);
          }
          /* Clean up Path memory */
          if(Path)
          {
            free(Path);
            Path = NULL;
          }
          fo_scheduler_heart(1);
        }
        PQclear(result);

        /* Record analysis success in mimetype_ars. */
        if (ars_pk) fo_WriteARS(pgConn, ars_pk, upload_pk, Agent_pk, AgentARSName, 0, 1);
      }
    }
  } /* if run from scheduler */

  /* Clean up */
  if (FMimetype) fclose(FMimetype);
  magic_close(MagicCookie);
  if (DBMime) PQclear(DBMime);
  if (pgConn) PQfinish(pgConn);
  /* after cleaning up agent, disconnect from the scheduler, this doesn't return */
  fo_scheduler_disconnect(0);
  return(0);
} /* main() */