Esempio n. 1
0
// guided_takeoff_run - takeoff in guided mode
//      called by guided_run at 100hz or more
void Copter::ModeGuided::takeoff_run()
{
    auto_takeoff_run();
    if (wp_nav->reached_wp_destination()) {
        const Vector3f target = wp_nav->get_wp_destination();
        set_destination(target);
    }
}
void show_navigation_info_window(const char *destination, uint32_t distance, time_t eta_timestamp) {
    initialise_ui();
    set_destination(destination);
    set_distance(distance);
    set_eta(eta_timestamp);
    window_set_window_handlers(s_window, (WindowHandlers) {
        .unload = handle_window_unload,
    });
// We cannot rely on locks here, since the free-running threads must run at
// full speed.
//
// Used in the runtime linkage of calls; see class CompiledIC.
// (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
void NativeCall::set_destination_mt_safe(address dest) {
  debug_only(verify());
  // Make sure patching code is locked.  No two threads can patch at the same
  // time but one may be executing this code.
  assert(Patching_lock->is_locked() ||
         SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 
  // Both C1 and C2 should now be generating code which aligns the patched address
  // to be within a single cache line except that C1 does not do the alignment on
  // uniprocessor systems.
  assert(!os::is_MP() || ((uintptr_t)displacement_address() / cache_line_size ==
      ((uintptr_t)displacement_address()+3) / cache_line_size), "destination should be aligned");
  if ((uintptr_t)displacement_address() / cache_line_size ==
      ((uintptr_t)displacement_address()+3) / cache_line_size) {
    // Simple case:  The destination lies within a single cache line.
    set_destination(dest);
  } else if ((uintptr_t)instruction_address() / cache_line_size ==
	     ((uintptr_t)instruction_address()+1) / cache_line_size) {
    // Tricky case:  The instruction prefix lies within a single cache line.
    int disp = dest - return_address();
    int call_opcode = instruction_address()[0];

    // First patch dummy jump in place:
    {
      unsigned char patch_jump[2];
      patch_jump[0] = 0xEB;       // jmp rel8
      patch_jump[1] = 0xFE;       // jmp to self
      assert(sizeof(patch_jump)==sizeof(short), "sanity check");
      *(short*)instruction_address() = *(short*)patch_jump;
    }

    OrderAccess::fence();
    // (Note: We assume any reader which has already started to read
    // the unpatched call will completely read the whole unpatched call
    // without seeing the next writes we are about to make.)

    // Next, patch the last three bytes:
    unsigned char patch_disp[5];
    patch_disp[0] = call_opcode;
    *(int*)&patch_disp[1] = disp;
    assert(sizeof(patch_disp)==instruction_size, "sanity check");
    for (int i = sizeof(short); i < instruction_size; i++)
      instruction_address()[i] = patch_disp[i];

    OrderAccess::fence();
    // (Note: We assume that any reader which reads the opcode we are
    // about to repatch will also read the writes we just made.)

    // Finally, overwrite the jump:
    *(short*)instruction_address() = *(short*)&patch_disp[0];
    
    debug_only(verify());
    guarantee(destination() == dest, "patch succeeded");
  } else {
    // Impossible:  One or the other must be atomically writable.
    ShouldNotReachHere();
  }
  ICache::invalidate_range(instruction_address(), instruction_size);
}
Esempio n. 4
0
void forge_ipv4_header( void* buf , struct sockaddr* destination , uint8_t protonum ) {
    ipv4_header_t* iph = (ipv4_header_t*) buf;
    memset( buf , 0 , sizeof( ipv4_header_t ) );
    iph->version = 4;
    iph->IHL= 5;
    set_destination( iph ,(struct sockaddr_in *) destination );
    set_source( iph , inet_addr(SOURCEIP) );
    set_TTL(iph, 80);
    set_protocol( iph, protonum );
    set_packet_length( iph , 28 );//taille d'un echo request
    set_ipv4_checksum( iph );
}
Esempio n. 5
0
File: avp.c Progetto: 2pac/kamailio
static int xlset_destination(struct sip_msg* msg, char* format, char* p2)
{
    str val;
    
    if (xl_printstr(msg, (xl_elog_t*) format, &val.s, &val.len) > 0) {
	DBG("Setting dest to: '%.*s'\n", val.len, val.s);
	if (set_destination(msg, &val) == 0) {
	    return 1;
	}
    }
    
    return -1;
}
Esempio n. 6
0
LocalZipTask::LocalZipTask(rc_ptr<ThreadPool> tp, TimerManager *tmgr, rc_ptr<XmlElement> config)
    : AbstractSyncTask(tp, tmgr)
{
    assert(tp.is_not_null() && NULL != tmgr && config.is_not_null());
    const string& name = config->get_name();
    set_name(QString::fromLocal8Bit(name.data(), name.length()));

    rc_ptr<XmlElement> x = config->get_child("source");
    if (x.is_not_null())
    {
        const string& text = x->get_text();
        set_source(QString::fromLocal8Bit(text.data(), text.length()));
    }
    else
    {
        NUT_LOG_E("plugin.syncer.localzip", "source path is missing for task %s",
            get_name().toLocal8Bit().data());
    }

    x = config->get_child("destination");
    if (x.is_not_null())
    {
        const string text = x->get_text();
        set_destination(QString::fromLocal8Bit(text.data(), text.length()));
    }
    else
    {
        NUT_LOG_E("plugin.syncer.localzip", "destination path is missing for task %s",
            get_name().toLocal8Bit().data());
    }

    x = config->get_child("timer");
    if (x.is_not_null())
    {
        const string text = x->get_text();
        const time_t secs = QString::fromLocal8Bit(text.data(), text.length()).toLong();
        set_timer_interval(secs);
    }

    start_timer();
}
Esempio n. 7
0
File: avp.c Progetto: 2pac/kamailio
static int attr_destination(struct sip_msg* msg, char* p1, char* p2)
{
    avp_t* avp;
    avp_value_t val;
    
    if ((avp = search_avp(((fparam_t*)p1)->v.avp, &val, NULL))) {
	if (avp->flags & AVP_VAL_STR) {
	    if (set_destination(msg, &val.s)) {
		LOG(L_ERR, "ERROR: avp_destination: Can't set dst uri\n");
		return -1;
	    };
		/* dst_uri changed, so it makes sense to re-use the current uri for
			forking */
		ruri_mark_new(); /* re-use uri for serial forking */
	    return 1;
	} else {
	    ERR("avp_destination:AVP has numeric value\n");
	    return -1;
	}
    }
    return -1;
}
Esempio n. 8
0
	Deal& to(const DistributedSubAccount& dsa) { set_destination(dsa); return *this; }
Esempio n. 9
0
int
ResState::eval( void )
{
	int want_suspend;
#if HAVE_BACKFILL
	int kill_rval; 
#endif /* HAVE_BACKFILL */

		// we may need to modify the load average in our internal
		// policy classad if we're currently running a COD job or have
		// been running 1 in the last minute.  so, give our rip a
		// chance to modify the load, if necessary, before we evaluate
		// anything.  
	rip->hackLoadForCOD();

		// also, since we might be an SMP where other slots just changed
		// their state, we also want to re-publish the shared slot
		// attributes so that other slots can see those results.
	rip->refreshSlotAttrs();

	updateActivityAverages();

	switch( r_state ) {

	case claimed_state:
		if( r_act == suspended_act && rip->isSuspendedForCOD() ) { 
				// this is the special case where we do *NOT* want to
				// evaluate any policy expressions.  so long as
				// there's an active COD job, we want to leave the
				// opportunistic claim "checkpointed to swap"
			return 0;
		}
		if( rip->inRetirement() ) { // have we been preempted?
			if( rip->retirementExpired() ) {
					// Normally, when we are in retirement, we will
					// also be in the "retiring" activity.  However,
					// it is also possible to be in the suspended
					// activity.  Just to simplify things, we have one
					// catch-all state transition here.  We may also
					// get here in some other activity (e.g. idle or
					// busy) if we just got preempted and haven't had
					// time to transition into some other state.  No
					// matter.  Whatever activity we were in, the
					// retirement time has expired, so it is time to
					// change to the preempting state.
				if( rip->isDraining() ) {
					rip->setBadputCausedByDraining();
				}
				dprintf( D_ALWAYS, "State change: claim retirement ended/expired\n" );
				// STATE TRANSITION #18
				change( preempting_state );
				return TRUE; // XXX: change TRUE
			}
		}
		want_suspend = rip->wants_suspend();
		if( (r_act==busy_act && !want_suspend) ||
			(r_act==retiring_act && !rip->preemptWasTrue() && !want_suspend) ||
			(r_act==suspended_act && !rip->preemptWasTrue()) ) {

			//Explanation for the above conditions:
			//The want_suspend check is there because behavior is
			//potentially confusing without it.  Derek says:)
			//The preemptWasTrue check is there to see if we already
			//had PREEMPT turn TRUE, in which case, we don't need
			//to keep trying to retire over and over.

			if( rip->eval_preempt() ) {
				dprintf( D_ALWAYS, "State change: PREEMPT is TRUE\n" );
				// irreversible retirement
				// STATE TRANSITION #12 or #16
				rip->preemptIsTrue();
				return rip->retire_claim();
			}
		}
		if( r_act == retiring_act ) {
			if( rip->mayUnretire() ) {
				dprintf( D_ALWAYS, "State change: unretiring because no preempting claim exists\n" );
				// STATE TRANSITION #13
				change( busy_act );
				return TRUE; // XXX: change TRUE
			}
			if( rip->retirementExpired() ) {
				dprintf( D_ALWAYS, "State change: retirement ended/expired\n" );
				change( preempting_state );
				return TRUE; // XXX: change TRUE
			}
		}
		if( (r_act == busy_act || r_act == retiring_act) && want_suspend ) {
			if( rip->eval_suspend() ) {
				// STATE TRANSITION #14 or #17
				dprintf( D_ALWAYS, "State change: SUSPEND is TRUE\n" );
				change( suspended_act );
				return TRUE; // XXX: change TRUE
			}
		}
		if( r_act == suspended_act ) {
			if( rip->eval_continue() ) {
				// STATE TRANSITION #15
				dprintf( D_ALWAYS, "State change: CONTINUE is TRUE\n" );
				if( !rip->inRetirement() ) {
					change( busy_act );
					return TRUE; // XXX: change TRUE
				}
				else {
					// STATE TRANSITION #16
					change( retiring_act );
					return TRUE; // XXX: change TRUE
				}
			}
		}
		if( (r_act == busy_act) && rip->hasPreemptingClaim() ) {
			dprintf( D_ALWAYS, "State change: retiring due to preempting claim\n" );
			// reversible retirement (e.g. if preempting claim goes away)
			// STATE TRANSITION #12
			change( retiring_act );
			return TRUE; // XXX: change TRUE
		}
		if( (r_act == idle_act) && rip->hasPreemptingClaim() ) {
			dprintf( D_ALWAYS, "State change: preempting idle claim\n" );
			change( preempting_state );
			return TRUE; // XXX: change TRUE
		}
		if( (r_act == idle_act) && (rip->eval_start() == 0) ) {
				// START evaluates to False, so return to the owner
				// state.  In this case, we don't need to worry about
				// START locally evaluating to FALSE due to undefined
				// job attributes and well-placed meta-operators, b/c
				// we're in the claimed state, so we'll have a job ad
				// to evaluate against.
			dprintf( D_ALWAYS, "State change: START is false\n" );
			change( preempting_state ); 
			return TRUE; // XXX: change TRUE
		}
		if( (r_act == idle_act) && rip->claimWorklifeExpired() ) {
			dprintf( D_ALWAYS, "State change: idle claim shutting down due to CLAIM_WORKLIFE\n" );
			change( preempting_state );
			return TRUE; // XXX: change TRUE
		}
		if( (r_act == idle_act) && rip->isDraining() ) {
			dprintf( D_ALWAYS, "State change: idle claim shutting down due to draining of this slot\n" );
			change( preempting_state );
			return TRUE;
		}
		if( (r_act == busy_act || r_act == retiring_act) && (rip->wants_pckpt()) ) {
			rip->periodic_checkpoint();
		}

#if HAVE_JOB_HOOKS
			// If we're compiled to support fetching work
			// automatically and configured to do so, check now if we
			// should try to fetch more work.
		if (r_act != suspended_act) {
			rip->tryFetchWork();
		}
#endif /* HAVE_JOB_HOOKS */

		if( rip->r_reqexp->restore() ) {
				// Our reqexp changed states, send an update
			rip->update();
		}
		break;   // case claimed_state:

	case preempting_state:
		if( r_act == vacating_act ) {
			if( rip->eval_kill() ) {
				dprintf( D_ALWAYS, "State change: KILL is TRUE\n" );
					// STATE TRANSITION #19
				change( killing_act );
				return TRUE; // XXX: change TRUE
			}
		}
		break;	// case preempting_state:

	case unclaimed_state:
		if( Resource::DYNAMIC_SLOT == rip->get_feature() ) {
#if HAVE_JOB_HOOKS
				// If we're currently fetching we can't delete
				// ourselves. If we do when the hook returns we won't
				// be around to handle the response.
			if( rip->isCurrentlyFetching() ) {
				dprintf(D_ALWAYS, "State change: Unclaimed -> Deleted delayed for outstanding work fetch\n");
				break;
			}
#endif
			change( delete_state );
			return TRUE; // XXX: change TRUE
		}

		if( rip->isDraining() ) {
			dprintf( D_ALWAYS, "State change: entering Drained state\n" );
			change( drained_state, retiring_act );
			return TRUE;
		}

		// See if we should be owner or unclaimed
		if( rip->eval_is_owner() ) {
			dprintf( D_ALWAYS, "State change: IS_OWNER is TRUE\n" );
			change( owner_state );
			return TRUE; // XXX: change TRUE
		}

			// Check to see if we should run benchmarks
		if ( ! r_act_was_benchmark ) {
			int num_started;
			resmgr->m_attr->start_benchmarks( rip, num_started );
		}

#if HAVE_JOB_HOOKS
			// If we're compiled to support fetching work
			// automatically and configured to do so, check now if we
			// should try to fetch more work.
		rip->tryFetchWork();
#endif /* HAVE_JOB_HOOKS */

#if HAVE_BACKFILL
			// check if we should go into the Backfill state.  only do
			// so if a) we've got a BackfillMgr object configured and
			// instantiated, and b) START_BACKFILL evals to TRUE
		if( resmgr->m_backfill_mgr && rip->eval_start_backfill() > 0 ) {
			dprintf( D_ALWAYS, "State change: START_BACKFILL is TRUE\n" );
			change( backfill_state, idle_act );
			return TRUE; // XXX: change TRUE
		}
#endif /* HAVE_BACKFILL */

		if( rip->r_reqexp->restore() ) {
				// Our reqexp changed states, send an update
			rip->update();
		}

		break;	

	case owner_state:
			// If the dynamic slot is allocated in the owner state
			// (e.g. because of START expression contains attributes
			// of job ClassAd), it may never go back to Unclaimed 
			// state. So we need to delete the dynmaic slot in owner
			// state.
		if( Resource::DYNAMIC_SLOT == rip->get_feature() ) {
#if HAVE_JOB_HOOKS
				// If we're currently fetching we can't delete
				// ourselves. If we do when the hook returns we won't
				// be around to handle the response.
			if( rip->isCurrentlyFetching() ) {
				dprintf(D_ALWAYS, "State change: Owner -> Deleted delayed for outstanding work fetch\n");
				break;
			}
#endif
			change( delete_state );
			return TRUE; // XXX: change TRUE
		}

		if( rip->isDraining() ) {
			dprintf( D_ALWAYS, "State change: entering Drained state\n" );
			change( drained_state, retiring_act );
			return TRUE;
		}

		if( ! rip->eval_is_owner() ) {
			dprintf( D_ALWAYS, "State change: IS_OWNER is false\n" );
			change( unclaimed_state );
			return TRUE; // change() can delete rip
		}
#if HAVE_JOB_HOOKS
			// If we're compiled to support fetching work
			// automatically and configured to do so, check now if we
			// should try to fetch more work.  Even if we're in the
			// owner state, we can still see if the expressions allow
			// any fetched work at this point.
		rip->tryFetchWork();
#endif /* HAVE_JOB_HOOKS */

		break;	
		
	case matched_state:
			// Nothing to do here.  If we're matched, we only want to
			// leave if the match timer goes off, or if someone with
			// the right ClaimId tries to claim us.  We can't check
			// the START expression, since we don't have a job ad, and
			// we can't check IS_OWNER, since that isn't what you want
			// (IS_OWNER might be true, while the START expression
			// might allow some jobs in, and if you get matched with
			// one of those, you want to stay matched until they try
			// to claim us).  
		break;

#if HAVE_BACKFILL
	case backfill_state:
		if( ! resmgr->m_backfill_mgr ) { 
			EXCEPT( "in Backfill state but m_backfill_mgr is NULL!" );
		}
		if( r_act == killing_act ) {
				// maybe we should have a safety-valve timeout here to
				// prevent ourselves from staying in Backfill/Killing
				// for too long.  however, for now, there's nothing to
				// do here...
			return 0;
		}
			// see if we should leave the Backfill state
		kill_rval = rip->eval_evict_backfill(); 
		if( kill_rval > 0 ) {
			dprintf( D_ALWAYS, "State change: EVICT_BACKFILL is TRUE\n" );
			if( r_act == idle_act ) {
					// no sense going to killing if we're already
					// idle, go to owner immediately.
				change( owner_state );
				return TRUE; // XXX: change TRUE
			}
				// we can change into Backfill/Killing then set our
				// destination, since set_dest() won't take any
				// additional action if we're already in killing_act
			ASSERT( r_act == busy_act );
			change( backfill_state, killing_act );
			set_destination( owner_state );
			return TRUE;
		} else if( kill_rval < 0 ) {
			dprintf( D_ALWAYS, "WARNING: EVICT_BACKFILL is UNDEFINED, "
					 "staying in Backfill state\n" );
		}

#if HAVE_JOB_HOOKS
			// If we're compiled to support fetching work
			// automatically and configured to do so, check now if we
			// should try to fetch more work.
		rip->tryFetchWork();
#endif /* HAVE_JOB_HOOKS */

		if( r_act == idle_act ) {
				// if we're in Backfill/Idle, try to spawn a backfill job
			rip->start_backfill();
		}

		break;
#endif /* HAVE_BACKFILL */

	case drained_state:
		if( !rip->isDraining() ) {
			dprintf(D_ALWAYS,"State change: slot is no longer draining.\n");
			change( owner_state );
			return TRUE;
		}
		if( r_act == retiring_act ) {
			if( resmgr->drainingIsComplete( rip ) ) {
				dprintf(D_ALWAYS,"State change: draining is complete.\n");
				change( drained_state, idle_act );
				return TRUE;
			}
		}
		else if( r_act == idle_act ) {
			if( resmgr->considerResumingAfterDraining() ) {
				return TRUE;
			}
		}
		break;

	default:
		EXCEPT( "eval_state: ERROR: unknown state (%d)",
				(int)rip->state() );
	}
	return 0;
}
Esempio n. 10
0
// Similar to replace_mt_safe, but just changes the destination.  The
// important thing is that free-running threads are able to execute this
// call instruction at all times.  Thus, the displacement field must be
// instruction-word-aligned.  This is always true on SPARC.
//
// Used in the runtime linkage of calls; see class CompiledIC.
void NativeCall::set_destination_mt_safe(address dest) {
  assert(Patching_lock->is_locked() ||
         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
  // set_destination uses set_long_at which does the ICache::invalidate
  set_destination(dest);
}