gdouble slave_nextRandomDouble(Slave* slave) { MAGIC_ASSERT(slave); _slave_lock(slave); gdouble r = random_nextDouble(slave->random); _slave_unlock(slave); return r; }
gdouble engine_nextRandomDouble(Engine* engine) { MAGIC_ASSERT(engine); _engine_lock(engine); gdouble r = random_nextDouble(engine->random); _engine_unlock(engine); return r; }
static JSBool math_random(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) { JSRuntime *rt; jsdouble z; rt = cx->runtime; JS_LOCK_RUNTIME(rt); random_init(rt); z = random_nextDouble(rt); JS_UNLOCK_RUNTIME(rt); return js_NewNumberValue(cx, z, rval); }
bool SavedStacksMetadataCallback(JSContext *cx, JSObject **pmetadata) { SavedStacks &stacks = cx->compartment()->savedStacks(); if (stacks.allocationSkipCount > 0) { stacks.allocationSkipCount--; return true; } stacks.chooseSamplingProbability(cx); if (stacks.allocationSamplingProbability == 0.0) return true; // If the sampling probability is set to 1.0, we are always taking a sample // and can therefore leave allocationSkipCount at 0. if (stacks.allocationSamplingProbability != 1.0) { // Rather than generating a random number on every allocation to decide // if we want to sample that particular allocation (which would be // expensive), we calculate the number of allocations to skip before // taking the next sample. // // P = the probability we sample any given event. // // ~P = 1-P, the probability we don't sample a given event. // // (~P)^n = the probability that we skip at least the next n events. // // let X = random between 0 and 1. // // floor(log base ~P of X) = n, aka the number of events we should skip // until we take the next sample. Any value for X less than (~P)^n // yields a skip count greater than n, so the likelihood of a skip count // greater than n is (~P)^n, as required. double notSamplingProb = 1.0 - stacks.allocationSamplingProbability; stacks.allocationSkipCount = std::floor(std::log(random_nextDouble(&stacks.rngState)) / std::log(notSamplingProb)); } RootedSavedFrame frame(cx); if (!stacks.saveCurrentStack(cx, &frame)) return false; *pmetadata = frame; return Debugger::onLogAllocationSite(cx, frame, PRMJ_Now()); }
static in_port_t _host_getRandomFreePort(Host* host, in_addr_t interfaceIP, DescriptorType type) { MAGIC_ASSERT(host); NetworkInterface* interface = host_lookupInterface(host, interfaceIP); in_port_t randomNetworkPort = 0; if (interface && networkinterface_hasFreePorts(interface)) { gboolean freePortFound = FALSE; while (!freePortFound) { gdouble randomFraction = random_nextDouble(host->random); in_port_t randomHostPort = (in_port_t) (randomFraction * (UINT16_MAX - MIN_RANDOM_PORT)) + MIN_RANDOM_PORT; utility_assert(randomHostPort >= MIN_RANDOM_PORT); randomNetworkPort = htons(randomHostPort); freePortFound = _host_isInterfaceAvailable(host, interfaceIP, type, randomNetworkPort); } } return randomNetworkPort; }
void worker_schedulePacket(Packet* packet) { /* get our thread-private worker */ Worker* worker = _worker_getPrivate(); if(slave_isKilled(worker->slave)) { /* the simulation is over, don't bother */ return; } in_addr_t srcIP = packet_getSourceIP(packet); in_addr_t dstIP = packet_getDestinationIP(packet); Address* srcAddress = dns_resolveIPToAddress(worker_getDNS(), (guint32) srcIP); Address* dstAddress = dns_resolveIPToAddress(worker_getDNS(), (guint32) dstIP); if(!srcAddress || !dstAddress) { error("unable to schedule packet because of null addresses"); return; } /* check if network reliability forces us to 'drop' the packet */ gdouble reliability = topology_getReliability(worker_getTopology(), srcAddress, dstAddress); Random* random = host_getRandom(worker_getCurrentHost()); gdouble chance = random_nextDouble(random); /* don't drop control packets with length 0, otherwise congestion * control has problems responding to packet loss */ if(chance <= reliability || packet_getPayloadLength(packet) == 0) { /* the sender's packet will make it through, find latency */ gdouble latency = topology_getLatency(worker_getTopology(), srcAddress, dstAddress); SimulationTime delay = (SimulationTime) ceil(latency * SIMTIME_ONE_MILLISECOND); PacketArrivedEvent* event = packetarrived_new(packet); worker_scheduleEvent((Event*)event, delay, (GQuark)address_getID(dstAddress)); packet_addDeliveryStatus(packet, PDS_INET_SENT); } else { packet_addDeliveryStatus(packet, PDS_INET_DROPPED); } }
static void _scheduler_shuffleQueue(Scheduler* scheduler, GQueue* queue) { if(queue == NULL) { return; } /* convert queue to array */ guint length = g_queue_get_length(queue); gpointer array[length]; for(guint i = 0; i < length; i++) { array[i] = g_queue_pop_head(queue); } /* we now should have moved all elements from the queue to the array */ utility_assert(g_queue_is_empty(queue)); /* shuffle array - Fisher-Yates shuffle */ for(guint i = 0; i < length-1; i++) { gdouble randomFraction = random_nextDouble(scheduler->random); gdouble maxRange = (gdouble) length-i; guint j = (guint)floor(randomFraction * maxRange); /* handle edge case if we got 1.0 as a double */ if(j == length-i) { j--; } gpointer temp = array[i]; array[i] = array[i+j]; array[i+j] = temp; } /* reload the queue with the newly shuffled ordering */ for(guint i = 0; i < length; i++) { g_queue_push_tail(queue, array[i]); } }
static jsdouble FASTCALL math_random_tn(JSContext *cx) { return random_nextDouble(cx); }
static JSBool math_random(JSContext *cx, uintN argc, jsval *vp) { jsdouble z = random_nextDouble(cx); return js_NewNumberInRootedValue(cx, z, vp); }
static jsdouble FASTCALL math_random_tn(JSContext *cx) { return random_nextDouble(JS_THREAD_DATA(cx)); }