Exemple #1
0
void JitteredPacedFlow::send_pending_data() {
  if (received_bytes < size) {
    //std::cout << "Sending Pending Data" << std::endl;
    if (next_seq_no + mss > size) {
      next_seq_no = last_unacked_seq;
    }
    uint32_t seqn = next_seq_no;
    if (seqn + mss > size) {
      return;
    }
    next_seq_no = seqn + mss;
    if (received.count(seqn) == 0) {
      //std::cout << get_current_time() << " Enqueing " << seqn << "\n";
      uint32_t priority = get_priority(seqn);
      Packet *p = new Packet(get_current_time(), this, seqn, \
                     priority, mss + hdr_size, \
                     src, dst);

      double td = src->queue->get_transmission_delay(p->size);
      double wait = td / rate;
      double jitter = (wait - td) * rand() / RAND_MAX;
      add_to_event_queue(new PacketQueuingEvent(get_current_time() + jitter,
        p, src->queue));
      add_to_event_queue(new FlowProcessingEvent(get_current_time() + wait,
        this));
    } else {
      send_pending_data();
    }
  }
}
void FastpassFlow::send_data_pkt() {
    this->sender_last_pkt_sent = next_pkt_to_send();
    Packet *p = new Packet(get_current_time(), this, this->sender_last_pkt_sent * mss, 1, mss + hdr_size, src, dst);
    if(debug_flow(this->id))
        std::cout << get_current_time() << " flow " << this->id << " send data " << this->sender_last_pkt_sent << " \n";
    total_pkt_sent++;
    next_seq_no += mss;
    if(sender_remaining_num_pkts > 0) sender_remaining_num_pkts--;
    add_to_event_queue(new PacketQueuingEvent(get_current_time(), p, src->queue));
    if(this->sender_remaining_num_pkts == 0)
        add_to_event_queue(new FastpassTimeoutEvent(get_current_time() + FASTPASS_EPOCH_TIME, this));
}
int Sys_Input_GetKeyEvent(struct input_data *input, keynum_t *keynum, qboolean *down)
{
	pthread_mutex_lock(&input->key_mutex);
	
	if (input->repeatkey)
	{
		long long curtime = Sys_IntTime();

		while (input->nextrepeattime <= curtime)
		{
			add_to_event_queue(input, input->repeatkey, true);
			input->nextrepeattime += input->key_repeat_delay;
		}
	}
	
	pthread_mutex_unlock(&input->key_mutex);

	if (input->buttoneventhead == input->buttoneventtail)
	{
		return 0;
	}

	*keynum = input->buttonevents[input->buttoneventtail].key;
	*down = input->buttonevents[input->buttoneventtail].down;

	sequencepointkthx();

	input->buttoneventtail = (input->buttoneventtail + 1) % NUMBUTTONEVENTS;

	return 1;
}
Exemple #4
0
void MagicFlow::receive(Packet *p) {
    if (this->finished) {
        return;
    }
    if (p->type == NORMAL_PACKET) {
        received_count++;
        received_bytes += (p->size - hdr_size);
        //only send one ack per bdp
//        if (received_count == size_in_pkt){
//            assert( this == ((QuickSchedulingHost*)(this->dst))->flow_receiving );
//            ((QuickSchedulingHost*)(this->dst))->flow_receiving = NULL;
//        }

        if (received_count >= goal) {
            send_ack();
        }
    }
    else if (p->type == ACK_PACKET) {
        //assert(this == ((QuickSchedulingHost*)(this->src))->flow_sending );
        this->finished = true;
        //((QuickSchedulingHost*)(this->src))->flow_sending = NULL;
        //((QuickSchedulingHost*)(this->src))->schedule();
        add_to_event_queue(new FlowFinishedEvent(get_current_time(), this));
    }
    delete p;
}
Exemple #5
0
Packet* MagicFlow::send(uint32_t seq) {
    uint32_t priority = 1;
    if(this->size_in_pkt > 8)
        priority = 2;
    //priority = this->remaining_pkt();
    Packet *p = new Packet(get_current_time(), this, seq, priority, mss + hdr_size, src, dst);
    total_pkt_sent++;
    add_to_event_queue(new PacketQueuingEvent(get_current_time(), p, src->queue));
    return p;
}
Exemple #6
0
void MagicFlow::start_flow() {
    if (!this->added_infl_time) {
        add_to_event_queue(new FlowArrivalEvent(get_current_time() + 1.6e-6, this));
        this->added_infl_time = true;
        return;
    }
    else {
        FountainFlowWithSchedulingHost::start_flow();
    }
}
void RTSCTSHost::send() {
    //look at RTSes first
    if (this->active_CTS == NULL && !this->pending_RTS.empty()) {
        //pick a new CTS to send
        RTSCTS* rts = this->pending_RTS.top();
        this->pending_RTS.pop();
        //send a CTS for this flow
        Packet *cts = new RTSCTS(false, get_current_time(), rts->flow, rts->size, this, rts->src);
        add_to_event_queue(new PacketQueuingEvent(get_current_time(), cts, this->queue));
        
        this->active_CTS = (RTSCTS*) cts;

        if (this->host_proc_event == NULL || this->host_proc_event->time < get_current_time()) {
            double td = queue->get_transmission_delay(cts->size);
            this->host_proc_event = new HostProcessingEvent(get_current_time() + td, this);
            add_to_event_queue(this->host_proc_event);
        }
    }
    else {
        SchedulingHost::send();
    }
}
void PipelineSchedulingHost::handle_rts(RTS* rts, FountainFlowWithPipelineSchedulingHost* f)
{
    if(this->receiver_schedule_state == 1 && get_current_time() >= this->receiver_offer_time + 0.000009)
    {
        this->receiver_offer_unlock();
    }

    if(this->receiver_schedule_state == 0 && get_current_time() + rts->delay >= this->receiver_busy_until)
    {
        if(debug_flow(rts->flow->id)) std::cout << get_current_time() << " host " << this->id << " accepting rts of flow " << rts->flow->id << " from " << rts->src->id << "\n";

        OfferPkt* offer = new OfferPkt(f, rts->dst, rts->src, true, rts->iter);
        add_to_event_queue(new PacketQueuingEvent(get_current_time(), offer, offer->src->queue));
        this->receiver_offer_lock(f);
    }
    else
    {
        if(debug_flow(rts->flow->id)) std::cout << get_current_time() << " host " << this->id << " rejecting rts of flow " << rts->flow->id << " from " << rts->src->id << "\n";
        OfferPkt* offer = new OfferPkt(f, rts->dst, rts->src, false, rts->iter);
        add_to_event_queue(new PacketQueuingEvent(get_current_time(), offer, offer->src->queue));
    }
}
Exemple #9
0
void MagicFlow::send_pending_data() {
    Packet *p = this->send(next_seq_no);
    next_seq_no += mss;
    this->send_count++;
    assert(this->remaining_pkt_this_round > 0);
    this->remaining_pkt_this_round--;

    if(((SchedulingHost*) src)->host_proc_event == NULL || ((MagicHost*) src)->is_host_proc_event_a_timeout){
        if(((SchedulingHost*) src)->host_proc_event)
            ((SchedulingHost*) src)->host_proc_event->cancelled = true;
        double td = src->queue->get_transmission_delay(p->size);
        ((SchedulingHost*) src)->host_proc_event = new HostProcessingEvent(get_current_time() + td, (SchedulingHost*) src);
        add_to_event_queue(((SchedulingHost*) src)->host_proc_event);
    }
}
void FastpassFlow::receive(Packet *p) {
    if (p->type == FASTPASS_RTS) {
        if(debug_flow(this->id))
            std::cout << get_current_time() << " flow " << this->id << " received rts\n";
        ((PFabricTopology*) topology)->arbiter->receive_rts((FastpassRTS*) p);
    } else if (p->type == FASTPASS_SCHEDULE) {
        if(debug_flow(this->id))
            std::cout << get_current_time() << " flow " << this->id << " received schedule\n";
        ((FastpassHost*) this->src)->receive_schedule_pkt((FastpassSchedulePkt*) p);
    } else if (p->type == NORMAL_PACKET) {
        if(debug_flow(this->id))
            std::cout << get_current_time() << " flow " << this->id << " received data seq" << p->seq_no << "\n";
        this->send_ack_pkt(p->seq_no);
        this->received_bytes += mss;
        if(receiver_received && receiver_received->count(p->seq_no) == 0)
        {
            receiver_received->insert(p->seq_no);
            if(num_outstanding_packets >= ((p->size - hdr_size) / (mss)))
                num_outstanding_packets -= ((p->size - hdr_size) / (mss));
            else
                num_outstanding_packets = 0;
        }

    } else if (p->type == ACK_PACKET) {
        if(debug_flow(this->id))
            std::cout << get_current_time() << " flow " << this->id << " received ack seq" << p->seq_no << "\n";
        int acked_pkt = p->seq_no/mss;
        if(sender_acked && sender_acked->count(acked_pkt) == 0)
        {
            sender_acked->insert(acked_pkt);
            sender_acked_count++;
            while(sender_acked->count(sender_acked_until) > 0){
                sender_acked_until++;
            }
        }
        if(!this->sender_finished && sender_acked_count == this->size_in_pkt){
            this->sender_finished = true;
            this->update_remaining_size();
            add_to_event_queue(new FlowFinishedEvent(get_current_time(), this));
            this->finish_flow();
        }
    } else {
        assert(false);
    }
    delete p;
}
void PipelineSchedulingHost::send_RTS(){
    std::queue<FountainFlowWithPipelineSchedulingHost*> rts_sent;
    sender_rts_sent_count = 0;
    FountainFlowWithPipelineSchedulingHost* f = NULL;

//    if(this->current_sending_flow && this->current_sending_flow->total_pkt_sent < this->current_sending_flow->size_in_pkt - params.reauth_limit){
//        f = this->current_sending_flow;
//        RTS* rts = new RTS(f, f->src, f->dst, RECEIVER_ADVANCE, this->sender_iteration);
//        if(debug_flow(rts->flow->id) || debug_host(this->id))
//            std::cout << get_current_time() << " send rts for flow id:" << rts->flow->id
//            << " src:" << rts->src->id << " dst:" << rts->dst->id << " iter:" << this->sender_iteration << "\n";
//        assert(f->src->queue->limit_bytes - f->src->queue->bytes_in_queue >= rts->size);
//        add_to_event_queue(new PacketQueuingEvent(get_current_time(), rts, rts->src->queue));
//        sender_rts_sent_count++;
//    }

    for(int i = 0; i < RTS_BATCH_SIZE && !sending_flows.empty(); i++)
    {
        f = (FountainFlowWithPipelineSchedulingHost*)sending_flows.top();
        sending_flows.pop();
        if(f->finished)
            continue;
        if(!f->scheduled){
            RTS* rts = new RTS(f, f->src, f->dst, RECEIVER_ADVANCE, this->sender_iteration);
            f->rts_send_count++;
            if(debug_flow(rts->flow->id) || debug_host(this->id))
                std::cout << get_current_time() << " send rts for flow id:" << rts->flow->id
                << " src:" << rts->src->id << " dst:" << rts->dst->id << " iter:" << this->sender_iteration << "\n";
            assert(f->src->queue->limit_bytes - f->src->queue->bytes_in_queue >= rts->size);
            add_to_event_queue(new PacketQueuingEvent(get_current_time(), rts, rts->src->queue));
            rts_sent.push(f); //TODO:fix this
            sender_rts_sent_count++;
        }
    }
    while(!rts_sent.empty())
    {
        FountainFlowWithPipelineSchedulingHost* f = rts_sent.front();
        rts_sent.pop();
        sending_flows.push(f);
    }
}
Exemple #12
0
void PacedFlow::receive_ack(uint32_t ack, std::vector<uint32_t> sack_list) {
  this->scoreboard_sack_bytes = sack_list.size() * mss;
  // On timeouts; next_seq_no is updated to the last_unacked_seq;
  // In such cases, the ack can be greater than next_seq_no; update it
  if (next_seq_no < ack) {
    next_seq_no = ack;
  }

  // New ack!
  if (ack > last_unacked_seq) {
    // Update the last unacked seq
    last_unacked_seq = ack;
  }

  if (ack == size && !finished) {
    finished = true;
    finish_time = get_current_time();
    flow_completion_time = finish_time - start_time;
    FlowFinishedEvent *ev = new FlowFinishedEvent(get_current_time(), this);
    add_to_event_queue(ev);
  }
}
void SchedulingHost::send() {
    if (this->sending_flows.empty()) {
        return;
    }
    
    if (!this->queue->busy) {
        while (!this->sending_flows.empty() && (this->sending_flows.top())->finished) {
            this->sending_flows.pop();    
        }
        if (this->sending_flows.empty()) {
            return;
        }
        (this->sending_flows.top())->send_pending_data();
    }
    else {
        QueueProcessingEvent *qpe = this->queue->queue_proc_event;
        uint32_t queue_size = this->queue->bytes_in_queue;
        double td = this->queue->get_transmission_delay(queue_size);
        this->host_proc_event = new HostProcessingEvent(qpe->time + td, this);
        add_to_event_queue(this->host_proc_event);
    }
}
void PipelineSchedulingHost::send() {
    double host_proc_evt_time = 10000;
    if(debug_host(this->id))
        std::cout << get_current_time() << " PipelineSchedulingHost::send() at host" << this->id << "\n";
    //put a flow to sending_redundency if all data pkts are sent

    while(this->current_sending_flow && this->current_sending_flow->finished){
        this->current_sending_flow = NULL;

        if(next_sending_flow){
            if(debug_host(this->id))
                std::cout << get_current_time() << " exist next_sending_flow, setting cur_sending_flow to " << this->next_sending_flow->id << "\n";

            this->current_sending_flow = next_sending_flow;
            this->sender_busy_until = get_current_time() + 0.0000012 * this->current_sending_flow->remaining_schd_pkt;
            next_sending_flow = NULL;
        }
    }



    if(this->current_sending_flow && ((FountainFlowWithPipelineSchedulingHost*)(this->current_sending_flow))->remaining_schd_pkt == 0){


        if(((FountainFlowWithPipelineSchedulingHost*)(this->current_sending_flow))->total_pkt_sent >= this->current_sending_flow->size_in_pkt){
            ((FountainFlowWithPipelineSchedulingHost*)(this->current_sending_flow))->ack_timeout = get_current_time() + 0.0000095;
            this->sending_redundency.push((FountainFlowWithPipelineSchedulingHost*)(this->current_sending_flow));
            if(debug_host(this->id))
                std::cout << get_current_time() << " current flow " << this->current_sending_flow->id << " finished sending data pkt\n";
        }
        else{
            this->current_sending_flow->scheduled = false;
            this->sending_flows.push(this->current_sending_flow);
            if(debug_host(this->id))
                std::cout << get_current_time() << " current flow " << this->current_sending_flow->id << " finished sending in this round\n";

        }

        this->current_sending_flow = NULL;

        if(next_sending_flow){
            if(debug_host(this->id))
                std::cout << get_current_time() << " exist next_sending_flow, setting cur_sending_flow to " << this->next_sending_flow->id << "\n";

            this->current_sending_flow = next_sending_flow;
            this->sender_busy_until = get_current_time() + 0.0000012 * this->current_sending_flow->remaining_schd_pkt;
            next_sending_flow = NULL;
        }
    }




    //if queue busy, reschedule sent()
    if(this->queue->busy){
        if(this->host_proc_event == NULL){
            QueueProcessingEvent *qpe = this->queue->queue_proc_event;
            uint32_t queue_size = this->queue->bytes_in_queue;
            double td = this->queue->get_transmission_delay(queue_size);
            if(this->host_proc_event == NULL){
                this->host_proc_event = new HostProcessingEvent(qpe->time + td + 0.000000000001, this);
                add_to_event_queue(this->host_proc_event);
            }
        }
    }
    else
    {
        bool pkt_sent = false;

        if(sender_schedule_state == 1 && get_current_time() >= sender_last_rts_send_time + RTS_TIMEOUT){
            if(debug_host(this->id)) std::cout << get_current_time() << " !set sender_schedule_state = 0 at host " << this->id <<  "\n";
            sender_schedule_state = 0;
        }

        //send rts
        if(this->sender_busy_until <= get_current_time() + RTS_ADVANCE && sender_schedule_state == 0){
            if(debug_host(this->id) )
                std::cout << get_current_time() << " calling sendRTS() at " << this->id << " sender_schedule_state:" << sender_schedule_state <<
                    " sender_last_rts_send_time:" << sender_last_rts_send_time << " sender_busy_until:" << sender_busy_until <<  "\n";
            this->send_RTS();
            if(this->sender_rts_sent_count > 0){
                pkt_sent = true;
                this->sender_last_rts_send_time = get_current_time();
                this->sender_schedule_state = 1;

                double td = this->queue->get_transmission_delay(this->queue->bytes_in_queue);
                if(this->host_proc_event == NULL){
                    this->host_proc_event = new HostProcessingEvent(get_current_time() + td + 0.000000000001, this);
                    add_to_event_queue(this->host_proc_event);
                }
            }
        }

        //send redundency packets
        if(!pkt_sent && !this->sending_redundency.empty())
        {
            while(!this->sending_redundency.empty()){
                if(this->sending_redundency.top()->finished)
                    this->sending_redundency.pop();
                else{
                    //the earliest flow timeout
                    if( this->sending_redundency.top()->ack_timeout <= get_current_time() ){
                        FountainFlowWithPipelineSchedulingHost* f = this->sending_redundency.top();
                        this->sending_redundency.pop();
                        f->send_pending_data();
                        f->ack_timeout = get_current_time() + 0.0000095;
                        this->sending_redundency.push(f);
                        pkt_sent = true;
                    // the earliest flow haven't timeout
                    }else{
                        host_proc_evt_time = this->sending_redundency.top()->ack_timeout;
                    }

                    break; //should be her
                }
            }
        }

        //send normal data packet
        if (!pkt_sent && this->current_sending_flow){
            if(debug_flow(current_sending_flow->id) || debug_host(this->id))
                std::cout << get_current_time() << " send data pkt for flow id:" << current_sending_flow->id <<
                    " src:" << current_sending_flow->src->id << " dst:" << current_sending_flow->dst->id <<
                    " seq:" << current_sending_flow->next_seq_no << "\n";
            this->current_sending_flow->send_pending_data();
            pkt_sent = true;
        }
        else if(host_proc_evt_time < 10000 && this->host_proc_event == NULL){
            this->host_proc_event = new HostProcessingEvent(host_proc_evt_time + 0.000000000001, this);
            add_to_event_queue(this->host_proc_event);
        }
//        else if(!pkt_sent){
//            while(!this->sending_flows.empty()){
//                if(this->sending_flows.top()->finished)
//                    this->sending_flows.pop();
//                else{
//                    this->sending_flows.top()->send_pending_data();
//                    pkt_sent = true;
//                    break;
//                }
//            }
//        }

    }
}
void FastpassFlow::send_schedule_pkt(FastpassEpochSchedule* schd) {
    FastpassSchedulePkt* pkt = new FastpassSchedulePkt(this, ((PFabricTopology*) topology)->arbiter, this->src, schd);
    add_to_event_queue(new PacketQueuingEvent(get_current_time(), pkt, ((PFabricTopology*) topology)->arbiter->queue));
}
void FastpassFlow::send_ack_pkt(uint32_t seq) {
    PlainAck* ack = new PlainAck(this, seq, params.hdr_size, this->dst, this->src);
    add_to_event_queue(new PacketQueuingEvent(get_current_time(), ack, this->dst->queue));
}
void FastpassFlow::update_remaining_size() {
    FastpassRTS* rts = new FastpassRTS(this, this->src, ((PFabricTopology*) topology)->arbiter, this->sender_finished?-1:this->sender_remaining_num_pkts);
    add_to_event_queue(new PacketQueuingEvent(get_current_time(), rts, src->queue));
}
void FastpassFlow::schedule_send_pkt(double time) {
    add_to_event_queue(new FastpassFlowProcessingEvent(time, this));
}
static void input_callback(void *context, IOReturn result, void *sender, IOHIDValueRef value)
{
	struct input_data *input = (struct input_data*)context;
	IOHIDElementRef elem = IOHIDValueGetElement(value);
	uint32_t page = IOHIDElementGetUsagePage(elem);
	uint32_t usage = IOHIDElementGetUsage(elem);
	uint32_t val = IOHIDValueGetIntegerValue(value);

	if (page == kHIDPage_GenericDesktop)
	{
		if (input->ignore_mouse)
		{
			return;
		}

		switch (usage)
		{
			case kHIDUsage_GD_X:
				pthread_mutex_lock(&input->mouse_mutex);
				input->mouse_x += val;
				pthread_mutex_unlock(&input->mouse_mutex);
				break;
			case kHIDUsage_GD_Y:
				pthread_mutex_lock(&input->mouse_mutex);
				input->mouse_y += val;
				pthread_mutex_unlock(&input->mouse_mutex);
				break;
			case kHIDUsage_GD_Wheel:
				if ((int32_t)val > 0)
				{
					add_to_event_queue(input, K_MWHEELUP, true);
					add_to_event_queue(input, K_MWHEELUP, false);
				}
				else if ((int32_t)val < 0)
				{
					add_to_event_queue(input, K_MWHEELDOWN, true);
					add_to_event_queue(input, K_MWHEELDOWN, false);
				}
				break;
			default:
				break;
		}
	}
	else if (page == kHIDPage_Button)
	{
		if (input->ignore_mouse)
		{
			return;
		}

		if (usage < 1 || usage > 10)
		{
			usage = 10;
		}

		add_to_event_queue(input, K_MOUSE1 + usage - 1, val ? true : false);
	}
	else if (page == kHIDPage_KeyboardOrKeypad)
	{
		if (usage == kHIDUsage_KeyboardLeftGUI)
		{
			input->left_cmd_key_active = val ? true : false;
		}
		else if (usage == kHIDUsage_KeyboardRightGUI)
		{
			input->right_cmd_key_active = val ? true : false;
		}

		if (usage < sizeof(keytable) && (input->left_cmd_key_active || input->right_cmd_key_active))
		{
			if (keytable[usage] == 'c' && val)
			{
				add_to_event_queue(input, K_COPY, true);
				add_to_event_queue(input, K_COPY, false);
			}
			else if (keytable[usage] == 'v' && val)
			{
				add_to_event_queue(input, K_PASTE, true);
				add_to_event_queue(input, K_PASTE, false);
			}

			return;
		}

		if (usage < sizeof(keytable))
		{
			add_to_event_queue(input, keytable[usage], val ? true : false);

			pthread_mutex_lock(&input->key_mutex);

			if (val)
			{
				input->repeatkey = keytable[usage];
				input->nextrepeattime = Sys_IntTime() + input->key_repeat_initial_delay;
			}
			else
			{
				input->repeatkey = 0;
				input->nextrepeattime = 0;
			}

			pthread_mutex_unlock(&input->key_mutex);
		}
	}
	else if (page == 0xFF)
	{
		if (usage == kHIDUsage_KeyboardErrorUndefined)
		{
			input->fn_key_active = val ? true : false;
		}
	}
}