示例#1
0
int MyMusicTreeModel::Compare( const wxDataViewItem &item1, const wxDataViewItem &item2,
                               unsigned int column, bool ascending ) const
{
    wxASSERT(item1.IsOk() && item2.IsOk());
        // should never happen

    if (IsContainer(item1) && IsContainer(item2))
    {
        wxVariant value1, value2;
        GetValue( value1, item1, 0 );
        GetValue( value2, item2, 0 );

        wxString str1 = value1.GetString();
        wxString str2 = value2.GetString();
        int res = str1.Cmp( str2 );
        if (res) return res;

        // items must be different
        wxUIntPtr litem1 = (wxUIntPtr) item1.GetID();
        wxUIntPtr litem2 = (wxUIntPtr) item2.GetID();

        return litem1-litem2;
    }

    return wxDataViewModel::Compare( item1, item2, column, ascending );
}
示例#2
0
String Directory::MakePathName(bool addSepFlag, const char *pathNameTrail) const
{
	String pathName(_name);
	for (Directory *pDirectory = GetParent(); pDirectory != nullptr;
										pDirectory = pDirectory->GetParent()) {
		// a "boundary container" directory may have an empty name
		if (*pDirectory->GetName() != '\0' || pDirectory->IsRootContainer()) {
			String str(pDirectory->GetName());
			size_t len = str.size();
			if (len == 0 || !IsFileSeparator(str[len - 1])) {
				str += pDirectory->GetSeparator();
			}
			str += pathName;
			pathName = str;
		}
	}
	if (pathNameTrail != nullptr) {
		size_t len = pathName.size();
		if (len == 0 || !IsFileSeparator(pathName[len - 1])) {
			pathName += GetSeparator();
		}
		for (const char *p = pathNameTrail; *p != '\0'; p++) {
			char ch = IsFileSeparator(*p)? GetSeparator() : *p;
			pathName += ch;
		}
	} else if (addSepFlag && IsContainer() && !_name.empty()) {
		size_t len = pathName.size();
		if (len > 0 && !IsFileSeparator(pathName[len - 1])) {
			pathName += GetSeparator();
		}
	}
	return pathName;
}
示例#3
0
void Item::DeleteMe()
{
    if( IsContainer() )
        TO_CONTAINER(this)->Destruct();
    else
        Destruct();
}
void DIALOG_LIB_EDIT_PIN_TABLE::DataViewModel::Refresh()
{
#ifdef REASSOCIATE_HACK
    m_Widget->AssociateModel( this );
#else
    std::queue<wxDataViewItem> todo;
    todo.push( wxDataViewItem() );

    while( !todo.empty() )
    {
        wxDataViewItem current = todo.front();
        wxDataViewItemArray items;

        GetChildren( current, items );
        ItemsAdded( current, items );

        for( wxDataViewItemArray::const_iterator i = items.begin(); i != items.end(); ++i )
        {
            if( IsContainer( *i ) )
                todo.push( *i );
        }

        todo.pop();
    }

#endif
}
NS_IMETHODIMP 
mozSanitizingHTMLSerializer::AppendElementEnd(nsIDOMElement *aElement,
                                              nsAString& aStr)
{
  NS_ENSURE_ARG(aElement);

  mContent = do_QueryInterface(aElement);
  NS_ENSURE_TRUE(mContent, NS_ERROR_FAILURE);

  mOutputString = &aStr;

  nsresult rv = NS_OK;
  PRInt32 id = GetIdForContent(mContent);

  PRBool isContainer = IsContainer(id);

  if (isContainer) {
    rv = DoCloseContainer(id);
  }

  mContent = 0;
  mOutputString = nsnull;

  return rv;
}
NS_IMETHODIMP 
mozSanitizingHTMLSerializer::AppendElementStart(nsIDOMElement *aElement,
                                                nsIDOMElement *aOriginalElement,
                                                nsAString& aStr)
{
  NS_ENSURE_ARG(aElement);

  mContent = do_QueryInterface(aElement);
  NS_ENSURE_TRUE(mContent, NS_ERROR_FAILURE);

  mOutputString = &aStr;

  PRInt32 id = GetIdForContent(mContent);

  PRBool isContainer = IsContainer(id);

  nsresult rv;
  if (isContainer) {
    rv = DoOpenContainer(id);
  }
  else {
    rv = DoAddLeaf(id, EmptyString());
  }

  mContent = 0;
  mOutputString = nsnull;

  return rv;
} 
示例#7
0
void Item::DeleteMe()
{
	//Don't inline me!
	if( IsContainer() ) {
		delete static_cast<Container*>(this);
	} else {
		ItemPool.PooledDelete( this );
	}
}
示例#8
0
文件: Item.cpp 项目: xiaofeng/Arcemu
void Item::DeleteMe()
{
	//Don't inline me!

    // check to see if our owner is instantiated
    if( this->m_owner != NULL )
        this->m_owner->GetItemInterface()->RemoveRefundable( this->GetGUID() );

	if( IsContainer() ) {
		delete static_cast<Container*>(this);
	} else {
		delete this;
	}
}
示例#9
0
文件: Item.cpp 项目: lev1976g/easywow
uint32 Item::GetSocketsCount()
{
    if (IsContainer()) // no sockets on containers.
        return 0;

    uint32 c = 0;
    for (uint32 x = 0; x < 3; x++)
        if (GetProto()->Sockets[x].SocketColor)
            c++;
    //prismatic socket
    if (GetEnchantment(PRISMATIC_ENCHANTMENT_SLOT) != NULL)
        c++;
    return c;
}
示例#10
0
Widget *Container::GetWidgetAt(const Point &pos)
{
	if (!Contains(pos)) return 0;

	for (auto end = m_widgets.rend(), it = m_widgets.rbegin(); it != end; ++it) {
		const auto widget = *it;
		const Point relpos = pos - widget->GetPosition() - widget->GetDrawOffset();
		if (widget->IsContainer()) {
			Widget* w = static_cast<Container*>(widget.Get())->GetWidgetAt(relpos);
			if (w) return w;
		} else if (widget->Contains(relpos))
			return widget.Get();
	}

	return this;
}
示例#11
0
文件: Item.cpp 项目: lev1976g/easywow
void Item::DeleteFromDB()
{
    if (m_itemProto->ContainerSlots > 0 && IsContainer())
    {
        /* deleting a container */
        for (uint32 i = 0; i < m_itemProto->ContainerSlots; ++i)
        {
            if (TO< Container* >(this)->GetItem(static_cast<int16>(i)) != NULL)
            {
                /* abort the delete */
                return;
            }
        }
    }

    CharacterDatabase.Execute("DELETE FROM playeritems WHERE guid = %u", m_uint32Values[LOWGUID]);
}
示例#12
0
void PWidget::AddChild(PWidget *child)
{
    passert(IsContainer(), "I am not a container");
    passert(child, "there is no child");
    passert(parScreen, "no parent screen yet");

    child->setParent(this);
    child->setParentScreen(parScreen);

    //first child
    if (this->child == NULL)
    {
        this->child = child;
        return;
    }

    //add another child
    this->child->AddSiblings(child);
}
示例#13
0
void Container::CollectShortcuts(std::map<KeySym,Widget*> &shortcuts)
{
	{
	const std::set<KeySym> &widgetShortcuts = GetShortcuts();
	if (!widgetShortcuts.empty())
		for (std::set<KeySym>::const_iterator j = widgetShortcuts.begin(); j != widgetShortcuts.end(); ++j)
			shortcuts[*j] = this;
	}

	for (auto end = m_widgets.rend(), it = m_widgets.rbegin(); it != end; ++it) {
		const auto widget = *it;
		if (widget->IsContainer())
			static_cast<Container*>(widget.Get())->CollectShortcuts(shortcuts);
		else {
			const std::set<KeySym> &widgetShortcuts = widget->GetShortcuts();
			if (!widgetShortcuts.empty())
				for (std::set<KeySym>::const_iterator j = widgetShortcuts.begin(); j != widgetShortcuts.end(); ++j)
					shortcuts[*j] = widget.Get();
		}
	}
}
wxString TagEntry::FormatComment()
{
    if(m_isCommentForamtted) return m_formattedComment;
    m_isCommentForamtted = true;
    m_formattedComment.Clear();
    
    // Send the plugins an event requesting tooltip for this tag
    if(IsMethod()) {

        if(IsConstructor())
            m_formattedComment << wxT("<b>[Constructor]</b>\n");

        else if(IsDestructor())
            m_formattedComment << wxT("<b>[Destructor]</b>\n");

        TagEntryPtr p(new TagEntry(*this));
        m_formattedComment << wxT("<code>")
               << TagsManagerST::Get()->FormatFunction(p, FunctionFormat_WithVirtual | FunctionFormat_Arg_Per_Line)
               << wxT("</code>\n");
        m_formattedComment.Replace(GetName(), wxT("<b>") + GetName() + wxT("</b>"));
    } else if(IsClass()) {

        m_formattedComment << wxT("<b>Kind:</b> ");
        m_formattedComment << GetKind() << "\n";

        if(GetInheritsAsString().IsEmpty() == false) {
            m_formattedComment << wxT("<b>Inherits:</b> ");
            m_formattedComment << GetInheritsAsString() << wxT("\n");
        }

    } else if(IsMacro() || IsTypedef() || IsContainer() || GetKind() == wxT("member") ||
              GetKind() == wxT("variable")) {

        m_formattedComment << wxT("<b>Kind:</b> ");
        m_formattedComment << GetKind() << "\n";

        m_formattedComment << wxT("<b>Match Pattern:</b> ");

        // Prettify the match pattern
        wxString matchPattern(GetPattern());
        matchPattern.Trim().Trim(false);

        if(matchPattern.StartsWith(wxT("/^"))) {
            matchPattern.Replace(wxT("/^"), wxT(""));
        }

        if(matchPattern.EndsWith(wxT("$/"))) {
            matchPattern.Replace(wxT("$/"), wxT(""));
        }

        matchPattern.Replace(wxT("\t"), wxT(" "));
        while(matchPattern.Replace(wxT("  "), wxT(" "))) {
        }

        matchPattern.Trim().Trim(false);

        // BUG#3082954: limit the size of the 'match pattern' to a reasonable size (200 chars)
        matchPattern = TagsManagerST::Get()->WrapLines(matchPattern);
        matchPattern.Replace(GetName(), wxT("<b>") + GetName() + wxT("</b>"));
        m_formattedComment << wxT("<code>") << matchPattern << wxT("</code>\n");

    }

    // Add comment section
    wxString tagComment;
    if(!GetFile().IsEmpty()) {
        
        CommentParseResult comments;
        ::ParseComments(GetFile().mb_str(wxConvUTF8).data(), comments);
    
        // search for comment in the current line, the line above it and 2 above it
        // use the first match we got
        for(size_t i = 0; i < 3; i++) {
            wxString comment = comments.getCommentForLine(GetLine()-i);
            if(!comment.IsEmpty()) {
                SetComment(comment);
                break;
            }
        }
    }
    
    if(!GetComment().IsEmpty()) {
        wxString theComment;
        theComment = GetComment();

        theComment = TagsManagerST::Get()->WrapLines(theComment);
        theComment.Trim(false);
        wxString tagComment = wxString::Format(wxT("%s\n"), theComment.c_str());
        if(m_formattedComment.IsEmpty() == false) {
            m_formattedComment.Trim().Trim(false);
            m_formattedComment << wxT("\n<hr>");
        }
        m_formattedComment << tagComment;
    }

    // Update all "doxy" comments and surround them with <green> tags
    static wxRegEx reDoxyParam("([@\\\\]{1}param)[ \t]+([_a-z][a-z0-9_]*)?", wxRE_DEFAULT | wxRE_ICASE);
    static wxRegEx reDoxyBrief("([@\\\\]{1}(brief|details))[ \t]*", wxRE_DEFAULT | wxRE_ICASE);
    static wxRegEx reDoxyThrow("([@\\\\]{1}(throw|throws))[ \t]*", wxRE_DEFAULT | wxRE_ICASE);
    static wxRegEx reDoxyReturn("([@\\\\]{1}(return|retval|returns))[ \t]*", wxRE_DEFAULT | wxRE_ICASE);
    static wxRegEx reDoxyToDo("([@\\\\]{1}todo)[ \t]*", wxRE_DEFAULT | wxRE_ICASE);
    static wxRegEx reDoxyRemark("([@\\\\]{1}(remarks|remark))[ \t]*", wxRE_DEFAULT | wxRE_ICASE);
    static wxRegEx reDate("([@\\\\]{1}date)[ \t]*", wxRE_DEFAULT | wxRE_ICASE);
    static wxRegEx reFN("([@\\\\]{1}fn)[ \t]*", wxRE_DEFAULT | wxRE_ICASE);

    if(reDoxyParam.IsValid() && reDoxyParam.Matches(m_formattedComment)) {
        reDoxyParam.ReplaceAll(&m_formattedComment, "\n<b>Parameter</b>\n<i>\\2</i>");
    }

    if(reDoxyBrief.IsValid() && reDoxyBrief.Matches(m_formattedComment)) {
        reDoxyBrief.ReplaceAll(&m_formattedComment, "");
    }

    if(reDoxyThrow.IsValid() && reDoxyThrow.Matches(m_formattedComment)) {
        reDoxyThrow.ReplaceAll(&m_formattedComment, "\n<b>Throws</b>\n");
    }

    if(reDoxyReturn.IsValid() && reDoxyReturn.Matches(m_formattedComment)) {
        reDoxyReturn.ReplaceAll(&m_formattedComment, "\n<b>Returns</b>\n");
    }

    if(reDoxyToDo.IsValid() && reDoxyToDo.Matches(m_formattedComment)) {
        reDoxyToDo.ReplaceAll(&m_formattedComment, "\n<b>TODO</b>\n");
    }

    if(reDoxyRemark.IsValid() && reDoxyRemark.Matches(m_formattedComment)) {
        reDoxyRemark.ReplaceAll(&m_formattedComment, "\n  ");
    }

    if(reDate.IsValid() && reDate.Matches(m_formattedComment)) {
        reDate.ReplaceAll(&m_formattedComment, "<b>Date</b> ");
    }

    if(reFN.IsValid() && reFN.Matches(m_formattedComment)) {
        size_t fnStart, fnLen, fnEnd;
        if(reFN.GetMatch(&fnStart, &fnLen)) {
            fnEnd = m_formattedComment.find('\n', fnStart);
            if(fnEnd != wxString::npos) {
                // remove the string from fnStart -> fnEnd (including ther terminating \n)
                m_formattedComment.Remove(fnStart, (fnEnd - fnStart) + 1);
            }
        }
    }

    // if nothing to display skip this
    m_formattedComment.Trim().Trim(false);
    return m_formattedComment;
}
示例#15
0
int main(int argc, char **argv)
{
  char *agentDesc = "Bucket agent";
  int cmdopt;
  int verbose = 0;
  int ReadFromStdin = 1;
  int head_uploadtree_pk = 0;
  PGconn *pgConn;
  PGresult *topresult;
  PGresult *result;
  char sqlbuf[512];
  char *Delims = ",= \t\n\r";
  char *token, *saveptr;
  int agent_pk = 0;
  int nomos_agent_pk = 0;
  int bucketpool_pk = 0;
  int ars_pk = 0;
  int readnum = 0;
  int rv;
  int hasPrules;
  int user_pk = 0;
  char *bucketpool_name;
  char *COMMIT_HASH;
  char *VERSION;
  char *uploadtree_tablename;
  char agent_rev[myBUFSIZ];
  int rerun = 0;


//  int *bucketList;
  pbucketdef_t bucketDefArray = 0;
  pbucketdef_t tmpbucketDefArray = 0;
  cacheroot_t  cacheroot;
  uploadtree_t  uploadtree;
  uploadtree.upload_fk = 0;

  /* connect to the scheduler */
  fo_scheduler_connect(&argc, argv, &pgConn);
  user_pk = fo_scheduler_userID(); /* get user_pk for user who queued the agent */

  /* command line options */
  while ((cmdopt = getopt(argc, argv, "rin:p:t:u:vc:hV")) != -1)
  {
    switch (cmdopt)
    {
      case 'i': /* "Initialize" */
            PQfinish(pgConn);
            exit(0);
      case 'n': /* bucketpool_name  */
            ReadFromStdin = 0;
            bucketpool_name = optarg;
            /* find the highest rev active bucketpool_pk */
            if (!bucketpool_pk)
            {
              bucketpool_pk = getBucketpool_pk(pgConn, bucketpool_name);
              if (!bucketpool_pk)
                printf("%s is not an active bucketpool name.\n", bucketpool_name);
            }
            break;
      case 'p': /* bucketpool_pk */
            ReadFromStdin = 0;
            bucketpool_pk = atoi(optarg);
            /* validate bucketpool_pk */
            sprintf(sqlbuf, "select bucketpool_pk from bucketpool where bucketpool_pk=%d and active='Y'", bucketpool_pk);
            bucketpool_pk = validate_pk(pgConn, sqlbuf);
            if (!bucketpool_pk)
              printf("%d is not an active bucketpool_pk.\n", atoi(optarg));
            break;
      case 't': /* uploadtree_pk */
            ReadFromStdin = 0;
            if (uploadtree.upload_fk) break;
            head_uploadtree_pk = atoi(optarg);
            /* validate bucketpool_pk */
            sprintf(sqlbuf, "select uploadtree_pk from uploadtree where uploadtree_pk=%d", head_uploadtree_pk);
            head_uploadtree_pk = validate_pk(pgConn, sqlbuf);
            if (!head_uploadtree_pk)
              printf("%d is not an active uploadtree_pk.\n", atoi(optarg));
            break;
      case 'u': /* upload_pk */
            ReadFromStdin = 0;
            if (!head_uploadtree_pk)
            {
              uploadtree.upload_fk = atoi(optarg);
              /* validate upload_pk  and get uploadtree_pk  */
              sprintf(sqlbuf, "select upload_pk from upload where upload_pk=%d", uploadtree.upload_fk);
              uploadtree.upload_fk = validate_pk(pgConn, sqlbuf);
              if (!uploadtree.upload_fk)
                printf("%d is not an valid upload_pk.\n", atoi(optarg));
              else
              {
                sprintf(sqlbuf, "select uploadtree_pk from uploadtree where upload_fk=%d and parent is null", uploadtree.upload_fk);
                head_uploadtree_pk = validate_pk(pgConn, sqlbuf);
              }
            }
            break;
      case 'v': /* verbose output for debugging  */
            verbose++;
            break;
      case 'c': break; /* handled by fo_scheduler_connect() */
      case 'r':
            rerun = 1; /** rerun bucket */
            break;
      case 'V': /* print version info */
            printf("%s", BuildVersion);
            PQfinish(pgConn);
            exit(0);
      default:
            Usage(argv[0]);
            PQfinish(pgConn);
            exit(-1);
    }
  }
  debug = verbose;

  /*** validate command line ***/
  if (!bucketpool_pk && !ReadFromStdin)
  {
    printf("FATAL: You must specify an active bucketpool.\n");
    Usage(argv[0]);
    exit(-1);
  }
  if (!head_uploadtree_pk && !ReadFromStdin)
  {
    printf("FATAL: You must specify a valid uploadtree_pk or upload_pk.\n");
    Usage(argv[0]);
    exit(-1);
  }

  /* get agent pk
   * Note, if GetAgentKey fails, this process will exit.
   */
  COMMIT_HASH = fo_sysconfig("buckets", "COMMIT_HASH");
  VERSION = fo_sysconfig("buckets", "VERSION");
  sprintf(agent_rev, "%s.%s", VERSION, COMMIT_HASH);
  agent_pk = fo_GetAgentKey(pgConn, basename(argv[0]), uploadtree.upload_fk, agent_rev, agentDesc);

  /*** Initialize the license_ref table cache ***/
  /* Build the license ref cache to hold 2**11 (2048) licenses.
     This MUST be a power of 2.
   */
  cacheroot.maxnodes = 2<<11;
  cacheroot.nodes = calloc(cacheroot.maxnodes, sizeof(cachenode_t));
  if (!lrcache_init(pgConn, &cacheroot))
  {
    printf("FATAL: Bucket agent could not allocate license_ref table cache.\n");
    exit(1);
  }


  /* main processing loop */
  while(++readnum)
  {
    uploadtree.upload_fk = 0;
    if (ReadFromStdin)
    {
      bucketpool_pk = 0;

      /* Read the bucketpool_pk and upload_pk from stdin.
       * Format looks like 'bppk=123, upk=987'
       */
      if (!fo_scheduler_next()) break;

      token = strtok_r(fo_scheduler_current(), Delims, &saveptr);
      while (token && (!uploadtree.upload_fk || !bucketpool_pk))
      {
        if (strcmp(token, "bppk") == 0)
        {
          bucketpool_pk = atoi(strtok_r(NULL, Delims, &saveptr));
        }
        else
        if (strcmp(token, "upk") == 0)
        {
          uploadtree.upload_fk = atoi(strtok_r(NULL, Delims, &saveptr));
        }
        token = strtok_r(NULL, Delims, &saveptr);
      }

      /* Check Permissions */
      if (GetUploadPerm(pgConn, uploadtree.upload_fk, user_pk) < PERM_WRITE)
      {
        LOG_ERROR("You have no update permissions on upload %d", uploadtree.upload_fk);
        continue;
      }

      /* From the upload_pk, get the head of the uploadtree, pfile_pk and ufile_name  */
      sprintf(sqlbuf, "select uploadtree_pk, pfile_fk, ufile_name, ufile_mode,lft,rgt from uploadtree \
             where upload_fk='%d' and parent is null limit 1", uploadtree.upload_fk);
      topresult = PQexec(pgConn, sqlbuf);
      if (fo_checkPQresult(pgConn, topresult, sqlbuf, agentDesc, __LINE__)) return -1;
      if (PQntuples(topresult) == 0)
      {
        printf("ERROR: %s.%s missing upload_pk %d.\nsql: %s",
               __FILE__, agentDesc, uploadtree.upload_fk, sqlbuf);
        PQclear(topresult);
        continue;
      }
      head_uploadtree_pk = atol(PQgetvalue(topresult, 0, 0));
      uploadtree.uploadtree_pk = head_uploadtree_pk;
      uploadtree.upload_fk = uploadtree.upload_fk;
      uploadtree.pfile_fk = atol(PQgetvalue(topresult, 0, 1));
      uploadtree.ufile_name = strdup(PQgetvalue(topresult, 0, 2));
      uploadtree.ufile_mode = atoi(PQgetvalue(topresult, 0, 3));
      uploadtree.lft = atoi(PQgetvalue(topresult, 0, 4));
      uploadtree.rgt = atoi(PQgetvalue(topresult, 0, 5));
      PQclear(topresult);
    } /* end ReadFromStdin */
    else
    {
      /* Only one input to process if from command line, so terminate if it's been done */
      if (readnum > 1) break;

      /* not reading from stdin
       * Get the pfile, and ufile_name for head_uploadtree_pk
       */
      sprintf(sqlbuf, "select pfile_fk, ufile_name, ufile_mode,lft,rgt, upload_fk from uploadtree where uploadtree_pk=%d", head_uploadtree_pk);
      topresult = PQexec(pgConn, sqlbuf);
      if (fo_checkPQresult(pgConn, topresult, sqlbuf, agentDesc, __LINE__))
      {
        free(uploadtree.ufile_name);
        return -1;
      }
      if (PQntuples(topresult) == 0)
      {
        printf("FATAL: %s.%s missing root uploadtree_pk %d\n",
               __FILE__, agentDesc, head_uploadtree_pk);
        PQclear(topresult);
        continue;
      }
      uploadtree.uploadtree_pk = head_uploadtree_pk;
      uploadtree.pfile_fk = atol(PQgetvalue(topresult, 0, 0));
      uploadtree.ufile_name = strdup(PQgetvalue(topresult, 0, 1));
      uploadtree.ufile_mode = atoi(PQgetvalue(topresult, 0, 2));
      uploadtree.lft = atoi(PQgetvalue(topresult, 0, 3));
      uploadtree.rgt = atoi(PQgetvalue(topresult, 0, 4));
      uploadtree.upload_fk = atoi(PQgetvalue(topresult, 0, 5));
      PQclear(topresult);
    }

    /* Find the most recent nomos data for this upload.  That's what we want to use
         to process the buckets.
     */
    nomos_agent_pk = LatestNomosAgent(pgConn, uploadtree.upload_fk);
    if (nomos_agent_pk == 0)
    {
      printf("WARNING: Bucket agent called on treeitem (%d), but the latest nomos agent hasn't created any license data for this tree.\n",
            head_uploadtree_pk);
      continue;
    }

    /* at this point we know:
     * bucketpool_pk, bucket agent_pk, nomos agent_pk, upload_pk,
     * pfile_pk, and head_uploadtree_pk (the uploadtree_pk of the head tree to scan)
     */

    /* Has the upload already been processed?  If so, we are done.
       Don't even bother to create a bucket_ars entry.
     */
    switch (UploadProcessed(pgConn, agent_pk, nomos_agent_pk, uploadtree.pfile_fk, head_uploadtree_pk, uploadtree.upload_fk, bucketpool_pk))
    {
      case 1:  /* upload has already been processed */
        if (1 == rerun) break;
        printf("LOG: Duplicate request for bucket agent to process upload_pk: %d, uploadtree_pk: %d, bucketpool_pk: %d, bucket agent_pk: %d, nomos agent_pk: %d, pfile_pk: %d ignored.\n",
             uploadtree.upload_fk, head_uploadtree_pk, bucketpool_pk, agent_pk, nomos_agent_pk, uploadtree.pfile_fk);
        continue;
      case -1: /* SQL error, UploadProcessed() wrote error message */
        continue;
      case 0:  /* upload has not been processed */
        break;
    }

    /*** Initialize the Bucket Definition List bucketDefArray  ***/
    bucketDefArray = initBuckets(pgConn, bucketpool_pk, &cacheroot);
    if (bucketDefArray == 0)
    {
      printf("FATAL: %s.%d Bucket definition for pool %d could not be initialized.\n",
             __FILE__, __LINE__, bucketpool_pk);
      exit(-2);
    }
    bucketDefArray->nomos_agent_pk = nomos_agent_pk;
    bucketDefArray->bucket_agent_pk = agent_pk;

    /* Find the correct uploadtree table name */
    uploadtree_tablename = GetUploadtreeTableName(pgConn, uploadtree.upload_fk);
    if (!(uploadtree_tablename))
    {
      LOG_FATAL("buckets passed invalid upload, upload_pk = %d", uploadtree.upload_fk);
      return(-110);
    }

    /* set uploadtree_tablename in all the bucket definition structs */
    for (tmpbucketDefArray = bucketDefArray; tmpbucketDefArray->bucket_pk; tmpbucketDefArray++)
    {
      tmpbucketDefArray->uploadtree_tablename = uploadtree_tablename;
    }

    /* loop through rules (bucket defs) to see if there are any package only rules */
    hasPrules = 0;
    for (tmpbucketDefArray = bucketDefArray; tmpbucketDefArray->bucket_pk; tmpbucketDefArray++)
      if (tmpbucketDefArray->applies_to == 'p')
      {
        hasPrules = 1;
        break;
      }

    /*** END initializing bucketDefArray  ***/

    /*** Initialize DEB_SOURCE and DEB_BINARY  ***/
    sprintf(sqlbuf, "select mimetype_pk from mimetype where mimetype_name='application/x-debian-package'");
    result = PQexec(pgConn, sqlbuf);
    if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
    if (PQntuples(result) == 0)
    {
      printf("FATAL: (%s.%d) Missing application/x-debian-package mimetype.\n",__FILE__,__LINE__);
      return -1;
    }
    DEB_BINARY = atoi(PQgetvalue(result, 0, 0));
    PQclear(result);

    sprintf(sqlbuf, "select mimetype_pk from mimetype where mimetype_name='application/x-debian-source'");
    result = PQexec(pgConn, sqlbuf);
    if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
    if (PQntuples(result) == 0)
    {
      printf("FATAL: (%s.%d) Missing application/x-debian-source mimetype.\n",__FILE__,__LINE__);
      return -1;
    }
    DEB_SOURCE = atoi(PQgetvalue(result, 0, 0));
    PQclear(result);
    /*** END Initialize DEB_SOURCE and DEB_BINARY  ***/

    /*** Record analysis start in bucket_ars, the bucket audit trail. ***/
    if (0 == rerun) { // do not have any bucket scan on this upload
      snprintf(sqlbuf, sizeof(sqlbuf),
          "insert into bucket_ars (agent_fk, upload_fk, ars_success, nomosagent_fk, bucketpool_fk) values(%d,%d,'%s',%d,%d)",
          agent_pk, uploadtree.upload_fk, "false", nomos_agent_pk, bucketpool_pk);
      if (debug)
        printf("%s(%d): %s\n", __FILE__, __LINE__, sqlbuf);

      result = PQexec(pgConn, sqlbuf);
      if (fo_checkPQcommand(pgConn, result, sqlbuf, __FILE__ ,__LINE__)) return -1;
      PQclear(result);

      /* retrieve the ars_pk of the newly inserted record */
      sprintf(sqlbuf, "select ars_pk from bucket_ars where agent_fk='%d' and upload_fk='%d' and ars_success='%s' and nomosagent_fk='%d' \
          and bucketpool_fk='%d' and ars_endtime is null \
          order by ars_starttime desc limit 1",
          agent_pk, uploadtree.upload_fk, "false", nomos_agent_pk, bucketpool_pk);
      result = PQexec(pgConn, sqlbuf);
      if (fo_checkPQresult(pgConn, result, sqlbuf, __FILE__, __LINE__)) return -1;
      if (PQntuples(result) == 0)
      {
        printf("FATAL: (%s.%d) Missing bucket_ars record.\n%s\n",__FILE__,__LINE__,sqlbuf);
        return -1;
      }
      ars_pk = atol(PQgetvalue(result, 0, 0));
      PQclear(result);
    }
    /*** END bucket_ars insert  ***/

    if (debug) printf("%s sql: %s\n",__FILE__, sqlbuf);

    /* process the tree for buckets
       Do this as a single transaction, therefore this agent must be
       run as a single thread.  This will prevent the scheduler from
       consuming excess time (this is a fast agent), and allow this
       process to update bucket_ars.
     */
    rv = walkTree(pgConn, bucketDefArray, agent_pk, head_uploadtree_pk, 0,
             hasPrules);
    /* if no errors and top level is a container, process the container */
    if ((!rv) && (IsContainer(uploadtree.ufile_mode)))
    {
      rv = processFile(pgConn, bucketDefArray, &uploadtree, agent_pk, hasPrules);
    }

    /* Record analysis end in bucket_ars, the bucket audit trail. */
    if (0 == rerun && ars_pk)
    {
      if (rv)
        snprintf(sqlbuf, sizeof(sqlbuf),
                "update bucket_ars set ars_endtime=now(), ars_success=false where ars_pk='%d'",
                ars_pk);
      else
        snprintf(sqlbuf, sizeof(sqlbuf),
                "update bucket_ars set ars_endtime=now(), ars_success=true where ars_pk='%d'",
                ars_pk);

      if (debug)
        printf("%s(%d): %s\n", __FILE__, __LINE__, sqlbuf);

      result = PQexec(pgConn, sqlbuf);
      if (fo_checkPQcommand(pgConn, result, sqlbuf, __FILE__ ,__LINE__)) return -1;
      PQclear(result);
    }
  }  /* end of main processing loop */