Example #1
0
// Fast checks (no DB access) to see if the job can be sent to the host.
// Reasons why not include:
// 1) the host doesn't have enough memory;
// 2) the host doesn't have enough disk space;
// 3) based on CPU speed, resource share and estimated delay,
//    the host probably won't get the result done within the delay bound
// 4) app isn't in user's "approved apps" list
//
// If the job is feasible, return 0 and fill in wu.delay_bound
// with the delay bound we've decided to use.
//
int wu_is_infeasible_fast(
    WORKUNIT& wu,
    int res_server_state, int res_priority, double res_report_deadline,
    APP& app, BEST_APP_VERSION& bav
) {
    int retval;

    // project-specific check
    //
    if (wu_is_infeasible_custom(wu, app, bav)) {
        return INFEASIBLE_CUSTOM;
    }

    if (config.user_filter) {
        if (wu.batch && wu.batch != g_reply->user.id) {
            return INFEASIBLE_USER_FILTER;
        }
    }

    // homogeneous redundancy: can't send if app uses HR and
    // 1) host is of unknown HR class, or
    // 2) WU is already committed to different HR class
    //
    if (app_hr_type(app)) {
        if (hr_unknown_class(g_reply->host, app_hr_type(app))) {
            if (config.debug_send) {
                log_messages.printf(MSG_NORMAL,
                    "[send] [HOST#%d] [WU#%u %s] host is of unknown class in HR type %d\n",
                    g_reply->host.id, wu.id, wu.name, app_hr_type(app)
                );
            }
            return INFEASIBLE_HR;
        }
        if (already_sent_to_different_hr_class(wu, app)) {
            if (config.debug_send) {
                log_messages.printf(MSG_NORMAL,
                    "[send] [HOST#%d] [WU#%u %s] failed quick HR check: WU is class %d, host is class %d\n",
                    g_reply->host.id, wu.id, wu.name, wu.hr_class, hr_class(g_request->host, app_hr_type(app))
                );
            }
            return INFEASIBLE_HR;
        }
    }

    // homogeneous app version
    //
    if (app.homogeneous_app_version) {
        int avid = wu.app_version_id;
        if (avid && bav.avp->id != avid) {
            if (config.debug_send) {
                log_messages.printf(MSG_NORMAL,
                    "[send] [HOST#%d] [WU#%u %s] failed homogeneous app version check: %d %d\n",
                    g_reply->host.id, wu.id, wu.name, avid, bav.avp->id
                );
            }
            return INFEASIBLE_HAV;
        }
    }

    if (config.one_result_per_user_per_wu || config.one_result_per_host_per_wu) {
        if (wu_already_in_reply(wu)) {
            return INFEASIBLE_DUP;
        }
    }

    retval = check_memory(wu);
    if (retval) return retval;
    retval = check_disk(wu);
    if (retval) return retval;
    retval = check_bandwidth(wu);
    if (retval) return retval;

    if (app.non_cpu_intensive) {
        return 0;
    }

    // do deadline check last because EDF sim uses some CPU
    //
    double opt, pess;
    get_delay_bound_range(
        wu, res_server_state, res_priority, res_report_deadline, bav, opt, pess
    );
    wu.delay_bound = (int)opt;
    if (opt == 0) {
        // this is a resend; skip deadline check
        return 0;
    }
    retval = check_deadline(wu, app, bav);
    if (retval && (opt != pess)) {
        wu.delay_bound = (int)pess;
        retval = check_deadline(wu, app, bav);
    }
    return retval;
}
int FalconSimCampMessage::Process(uchar autodisp)
{
	CampEntity				ent = (CampEntity)vuDatabase->Find(EntityId());
	FalconSessionEntity		*session = (FalconSessionEntity*) vuDatabase->Find(dataBlock.from);
	
	if(autodisp || !ent || !session || !FalconLocalGame)
		return 0;
	
	CampEnterCriticalSection();
	switch (dataBlock.message)
	{
	case simcampReaggregate:
		if (check_bandwidth (150))
		{
			ent->Reaggregate(session);
			//MonoPrint ("Reag   %d\n", ent->Id().num_);
		}
		break;

	case simcampDeaggregate:

// OW: me123 MP Fix
#if 0
		if (check_bandwidth (150))
		{
#else
		if (check_bandwidth (150)|| ent->IsSetFalcFlag(FEC_PLAYER_ENTERING|FEC_HASPLAYERS))//me123
		{//me123 addet the player check
			//me123 send player deags so they can go past pie 2 fast
#endif
			//MonoPrint ("Deag   %d\n", ent->Id().num_);
			ent->Deaggregate(session);
		}
		else
		{
			//MonoPrint ("NoDeag %d\n", ent->Id().num_)
		}
		break;

	case simcampChangeOwner:
		//			MonoPrint ("Sim Camp Change Owner %08x %08x%08x\n", ent, session->Id ());
		ent->RecordCurrentState (session, FALSE);
		ent->SetDeagOwner (session->Id ());
		break;
		
	case simcampRequestDeagData:
		// MonoPrint ("Request Deag Data\n");
		ent->SendDeaggregateData(FalconLocalGame);
		break;

	case simcampReaggregateFromData:
		ent->ReaggregateFromData(dataBlock.size,dataBlock.data);
		break;

	case simcampDeaggregateFromData:
		ent->DeaggregateFromData(dataBlock.size,dataBlock.data);
		break;

	case simcampChangeOwnerFromData:
		break;

	case simcampRequestAllDeagData:
		{
			SetTimeCompression(1);//me123 if a client is callign this he's in the pie
			//let's set the compresion to 1 on the host so we don'e f**k up the realtime
			//becourse the clients stops transmitting timecompresion and we go to 64 again for awhile.
			int count = 0;
			VuListIterator	deag_it(DeaggregateList);
			CampEntity c;
			c = (CampEntity) deag_it.GetFirst();
			while (c)
			{
				if ((!c->IsAggregate()) && (c->IsLocal()))
				{
					c->SendDeaggregateData(FalconLocalGame);
					count ++;
				}
				c = (CampEntity) deag_it.GetNext();
			}
			
			//			MonoPrint ("Request All Deag Data = %d\n", count);
			break;
		}
	}
	CampLeaveCriticalSection();
	
	return 0;
}