void web_connection_base::get_specific_peer_info(peer_info& p) const { if (is_interesting()) p.flags |= peer_info::interesting; if (is_choked()) p.flags |= peer_info::choked; if (!is_connecting() && m_server_string.empty()) p.flags |= peer_info::handshake; if (is_connecting() && !is_queued()) p.flags |= peer_info::connecting; if (is_queued()) p.flags |= peer_info::queued; p.client = m_server_string; }
// non prioritized means that, if there's a line for bandwidth, // others will cut in front of the non-prioritized peers. // this is used by web seeds int bandwidth_manager::request_bandwidth(boost::intrusive_ptr<bandwidth_socket> const& peer , int blk, int priority , bandwidth_channel* chan1 , bandwidth_channel* chan2 , bandwidth_channel* chan3 , bandwidth_channel* chan4 , bandwidth_channel* chan5 ) { INVARIANT_CHECK; if (m_abort) return 0; TORRENT_ASSERT(blk > 0); TORRENT_ASSERT(priority > 0); TORRENT_ASSERT(!is_queued(peer.get())); bw_request bwr(peer, blk, priority); int i = 0; if (chan1 && chan1->throttle() > 0) bwr.channel[i++] = chan1; if (chan2 && chan2->throttle() > 0) bwr.channel[i++] = chan2; if (chan3 && chan3->throttle() > 0) bwr.channel[i++] = chan3; if (chan4 && chan4->throttle() > 0) bwr.channel[i++] = chan4; if (chan5 && chan5->throttle() > 0) bwr.channel[i++] = chan5; if (i == 0) { // the connection is not rate limited by any of its // bandwidth channels, or it doesn't belong to any // channels. There's no point in adding it to // the queue, just satisfy the request immediately return blk; } m_queued_bytes += blk; m_queue.push_back(bwr); return 0; }
static void boost_priority(struct task_struct* t) { unsigned long flags; psnedf_domain_t* pedf = task_pedf(t); lt_t now; raw_readyq_lock_irqsave(&pedf->slock, flags); now = litmus_clock(); TRACE_TASK(t, "priority boosted at %llu\n", now); tsk_rt(t)->priority_boosted = 1; tsk_rt(t)->boost_start_time = now; if (pedf->scheduled != t) { /* holder may be queued: first stop queue changes */ raw_spin_lock(&pedf->domain.release_lock); if (is_queued(t) && /* If it is queued, then we need to re-order. */ bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node) && /* If we bubbled to the top, then we need to check for preemptions. */ edf_preemption_needed(&pedf->domain, pedf->scheduled)) preempt(pedf); raw_spin_unlock(&pedf->domain.release_lock); } /* else: nothing to do since the job is not queued while scheduled */ raw_readyq_unlock_irqrestore(&pedf->slock, flags); }
// non prioritized means that, if there's a line for bandwidth, // others will cut in front of the non-prioritized peers. // this is used by web seeds void request_bandwidth(intrusive_ptr<PeerConnection> const& peer , int blk, int priority , bandwidth_channel* chan1 = 0 , bandwidth_channel* chan2 = 0 , bandwidth_channel* chan3 = 0 , bandwidth_channel* chan4 = 0 , bandwidth_channel* chan5 = 0 ) { INVARIANT_CHECK; if (m_abort) return; TORRENT_ASSERT(blk > 0); TORRENT_ASSERT(priority > 0); TORRENT_ASSERT(!is_queued(peer.get())); bw_request<PeerConnection> bwr(peer, blk, priority); int i = 0; if (chan1 && chan1->throttle() > 0) bwr.channel[i++] = chan1; if (chan2 && chan2->throttle() > 0) bwr.channel[i++] = chan2; if (chan3 && chan3->throttle() > 0) bwr.channel[i++] = chan3; if (chan4 && chan4->throttle() > 0) bwr.channel[i++] = chan4; if (chan5 && chan5->throttle() > 0) bwr.channel[i++] = chan5; if (i == 0) { // the connection is not rate limited by any of its // bandwidth channels, or it doesn't belong to any // channels. There's no point in adding it to // the queue, just satisfy the request immediately bwr.peer->assign_bandwidth(m_channel, blk); return; } m_queued_bytes += blk; m_queue.push_back(bwr); }
void operator()(VD v) { // can be called blindly for all verts. only those with reachable path cost are queued // put(locp, v,0); // not necessary: property factory (even new int[N] will always default init if (!is_queued(v) && get(mu, v) != PT::unreachable()) { add_unsorted(v); } }
static void psnedf_task_block(struct task_struct *t) { /* only running tasks can block, thus t is in no queue */ TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state); BUG_ON(!is_realtime(t)); BUG_ON(is_queued(t)); }
static void enqueue(td_job_queue *queue, td_node *node) { assert(is_root(node) || !is_queued(node)); node->job.flags |= TD_JOBF_QUEUED; assert((queue->tail - queue->head) < queue->array_size); if (td_debug_check(queue->engine, TD_DEBUG_QUEUE)) printf("enqueueing %s\n", node->annotation); queue->array[queue->tail % queue->array_size] = node; ++queue->tail; }
static void psnedf_task_wake_up(struct task_struct *task) { unsigned long flags; psnedf_domain_t* pedf = task_pedf(task); rt_domain_t* edf = task_edf(task); lt_t now; TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); raw_readyq_lock_irqsave(&pedf->slock, flags); set_task_state(task, TASK_RUNNING); BUG_ON(is_queued(task)); now = litmus_clock(); if (is_sporadic(task) && is_tardy(task, now) #ifdef CONFIG_LITMUS_LOCKING /* We need to take suspensions because of semaphores into * account! If a job resumes after being suspended due to acquiring * a semaphore, it should never be treated as a new job release. */ && !is_priority_boosted(task) #endif ) { /* new sporadic release */ release_at(task, now); sched_trace_task_release(task); } budget_state_machine(task,on_wakeup); /* Only add to ready queue if it is not the currently-scheduled * task. This could be the case if a task was woken up concurrently * on a remote CPU before the executing CPU got around to actually * de-scheduling the task, i.e., wake_up() raced with schedule() * and won. */ if (pedf->scheduled != task) { requeue(task, edf); psnedf_preempt_check(pedf); } raw_readyq_unlock_irqrestore(&pedf->slock, flags); TRACE_TASK(task, "wake up done\n"); }
static void psnedf_task_exit(struct task_struct * t) { unsigned long flags; psnedf_domain_t* pedf = task_pedf(t); rt_domain_t* edf; raw_readyq_lock_irqsave(&pedf->slock, flags); /* disable budget enforcement */ budget_state_machine(t,on_exit); if (is_queued(t)) { /* dequeue */ edf = task_edf(t); remove(edf, t); } if (pedf->scheduled == t) pedf->scheduled = NULL; TRACE_TASK(t, "RIP, now reschedule\n"); preempt(pedf); raw_readyq_unlock_irqrestore(&pedf->slock, flags); }
// non prioritized means that, if there's a line for bandwidth, // others will cut in front of the non-prioritized peers. // this is used by web seeds int bandwidth_manager::request_bandwidth(boost::shared_ptr<bandwidth_socket> const& peer , int blk, int priority, bandwidth_channel** chan, int num_channels) { INVARIANT_CHECK; if (m_abort) return 0; TORRENT_ASSERT(blk > 0); TORRENT_ASSERT(priority > 0); // if this assert is hit, the peer is requesting more bandwidth before // being assigned bandwidth for an already outstanding request TORRENT_ASSERT(!is_queued(peer.get())); if (num_channels == 0) { // the connection is not rate limited by any of its // bandwidth channels, or it doesn't belong to any // channels. There's no point in adding it to // the queue, just satisfy the request immediately return blk; } int k = 0; bw_request bwr(peer, blk, priority); for (int i = 0; i < num_channels; ++i) { if (chan[i]->need_queueing(blk)) bwr.channel[k++] = chan[i]; } if (k == 0) return blk; m_queued_bytes += blk; m_queue.push_back(bwr); return 0; }
static void advance_job(td_job_queue *queue, td_node *node, int job_id) { td_jobstate state; while ((state = node->job.state) < TD_JOB_COMPLETED) { switch (state) { case TD_JOB_INITIAL: if (node->job.block_count > 0) { /* enqueue any blocking jobs and transition to the blocked state */ int i, count, bc = 0; td_node **deps = node->deps; for (i = 0, count = node->dep_count; i < count; ++i) { td_node *dep = deps[i]; if (!is_completed(dep)) { ++bc; if (!is_queued(dep) && dep->job.state < TD_JOB_BLOCKED) enqueue(queue, dep); } } assert(bc == node->job.block_count); transition_job(queue, node, TD_JOB_BLOCKED); pthread_cond_broadcast(&queue->work_avail); return; } else { /* nothing is blocking this job, so scan implicit deps immediately */ transition_job(queue, node, TD_JOB_SCANNING); } break; case TD_JOB_BLOCKED: assert(0 == node->job.block_count); if (0 == node->job.failed_deps) transition_job(queue, node, TD_JOB_SCANNING); else transition_job(queue, node, TD_JOB_FAILED); break; case TD_JOB_SCANNING: if (0 == scan_implicit_deps(queue, node)) { update_input_signature(queue, node); if (is_up_to_date(queue, node)) transition_job(queue, node, TD_JOB_UPTODATE); else transition_job(queue, node, TD_JOB_RUNNING); } else { /* implicit dependency scanning failed */ transition_job(queue, node, TD_JOB_FAILED); } break; case TD_JOB_RUNNING: if (0 != run_job(queue, node, job_id)) transition_job(queue, node, TD_JOB_FAILED); else transition_job(queue, node, TD_JOB_COMPLETED); break; default: assert(0); td_croak("can't get here"); break; } } if (is_completed(node)) { int qcount = 0; td_job_chain *chain = node->job.pending_jobs; if (td_debug_check(queue->engine, TD_DEBUG_QUEUE)) printf("%s completed - enqueing blocked jobs\n", node->annotation); /* unblock all jobs that are waiting for this job and enqueue them */ while (chain) { td_node *n = chain->node; if (is_failed(node)) n->job.failed_deps++; /* nodes blocked on this node can't be completed yet */ assert(!is_completed(n)); if (0 == --n->job.block_count) { if (!is_queued(n)) enqueue(queue, n); ++qcount; } chain = chain->next; } if (1 < qcount) pthread_cond_broadcast(&queue->work_avail); else if (1 == qcount) pthread_cond_signal(&queue->work_avail); } }
void safe_queue(VD v) { TUHG_SHOWQ(2, "safe_queue", v); if (!is_queued(v)) add_unsorted(v); }