bool
ScheddNegotiate::nextJob()
{
	while( !m_jobs->empty() ) {
		ResourceRequestCluster *cluster = m_jobs->front();
		ASSERT( cluster );

		m_current_auto_cluster_id = cluster->getAutoClusterId();

		if( !getAutoClusterRejected(m_current_auto_cluster_id) ) {
			while( cluster->popJob(m_current_job_id) ) {
				if( !scheduler_skipJob(m_current_job_id) ) {

					if( !scheduler_getJobAd( m_current_job_id, m_current_job_ad ) )
					{
						dprintf(D_FULLDEBUG,
							"skipping job %d.%d because it no longer exists\n",
							m_current_job_id.cluster,m_current_job_id.proc);
					}
					else {
						// Insert the number of jobs remaining in this
						// resource request cluster into the ad - the negotiator
						// may use this information to give us more than one match
						// at a time.
						// [[Future optimization idea: there may be jobs in this resource request
						// cluster that no longer exist in the queue; perhaps we should
						// iterate through them and make sure they still exist to prevent
						// asking the negotiator for more resources than we can really
						// use at the moment. ]]
						// - Todd July 2013 <*****@*****.**>
						int universe = CONDOR_UNIVERSE_MIN;
						m_current_job_ad.LookupInteger(ATTR_JOB_UNIVERSE,universe);
						// For now, do not use request counts with the dedicated scheduler
						if ( universe != CONDOR_UNIVERSE_PARALLEL ) {
							// add one to cluster size to cover the current popped job
							m_current_job_ad.Assign(ATTR_RESOURCE_REQUEST_COUNT,1+cluster->size());
						}

						// Copy attributes from chained parent ad into our copy 
						// so if parent is deleted before we finish negotiation,
						// we don't crash trying to access a deleted parent ad.
						m_current_job_ad.ChainCollapse();
						return true;
					}
				}
			}
		}

		m_jobs->pop_front();
		delete cluster;
	}

	m_current_auto_cluster_id = -1;
	m_current_job_id.cluster = -1;
	m_current_job_id.proc = -1;

	return false;
}
bool
ScheddNegotiate::nextJob()
{
	if ( m_num_resource_reqs_sent > 0 && m_num_resource_reqs_to_send == 0 ) {
		// If we have already sent off a list of resource requests via
		// sendResourceRequestList(), and we are not currently being asked
		// by the negotiator to send more requests (or jobs), then
		// we want nextJob() to essentially be a no-op here.
		// In other words, when doing resource request lists, we want
		// all the calls to nextJob() to be ignored unless we are
		// in the process of sending a resource request list.
		// Note that if we are talking to anything earlier than a v8.3.0
		// negotiator, m_num_resource_reqs_sent will always be 0.
		return true;
	}

	while( !m_jobs->empty() && m_jobs_can_offer ) {
		ResourceRequestCluster *cluster = m_jobs->front();
		ASSERT( cluster );

		m_current_auto_cluster_id = cluster->getAutoClusterId();

		if( !getAutoClusterRejected(m_current_auto_cluster_id) ) {
			while( cluster->popJob(m_current_job_id) ) {
				if( !scheduler_skipJob(m_current_job_id) ) {

					if( !scheduler_getJobAd( m_current_job_id, m_current_job_ad ) )
					{
						dprintf(D_FULLDEBUG,
							"skipping job %d.%d because it no longer exists\n",
							m_current_job_id.cluster,m_current_job_id.proc);
					}
					else {
						// Insert the number of jobs remaining in this
						// resource request cluster into the ad - the negotiator
						// may use this information to give us more than one match
						// at a time.
						// [[Future optimization idea: there may be jobs in this resource request
						// cluster that no longer exist in the queue; perhaps we should
						// iterate through them and make sure they still exist to prevent
						// asking the negotiator for more resources than we can really
						// use at the moment. ]]
						// - Todd July 2013 <*****@*****.**>
						int universe = CONDOR_UNIVERSE_MIN;
						m_current_job_ad.LookupInteger(ATTR_JOB_UNIVERSE,universe);
						// For now, do not use request counts with the dedicated scheduler
						if ( universe != CONDOR_UNIVERSE_PARALLEL ) {
							// add one to cluster size to cover the current popped job
							int resource_count = 1+cluster->size();
							if (resource_count > m_jobs_can_offer && (m_jobs_can_offer > 0))
							{
								dprintf(D_FULLDEBUG, "Offering %d jobs instead of %d to the negotiator for this cluster; nearing internal limits (MAX_JOBS_RUNNING, etc).\n", m_jobs_can_offer, resource_count);
								resource_count = m_jobs_can_offer;
							}
							m_jobs_can_offer -= resource_count;
							m_current_job_ad.Assign(ATTR_RESOURCE_REQUEST_COUNT,resource_count);
						}
						else {
							m_jobs_can_offer--;
						}

						// Copy attributes from chained parent ad into our copy 
						// so if parent is deleted before we finish negotiation,
						// we don't crash trying to access a deleted parent ad.
						m_current_job_ad.ChainCollapse();
						return true;
					}
				}
			}
		}

		m_jobs->pop_front();
		delete cluster;
	}
	if (!m_jobs_can_offer)
	{
		dprintf(D_FULLDEBUG, "Not offering any more jobs to the negotiator because I am nearing the internal limits (MAX_JOBS_RUNNING, etc).\n");
	}

	m_current_auto_cluster_id = -1;
	m_current_job_id.cluster = -1;
	m_current_job_id.proc = -1;

	return false;
}