Esempio n. 1
0
// Thread which will handle dequeing and re-enqueing based on the status
// and the flags for all ports in the output buffer
void *output_monitor_routine(void *arg) {
    packet_t in_pkt ;
    ip_address_t address ;
    int dest_port=0 ;
    int loop_count= 0 ;
    element_to_queue * element ;

    while(!die) {

        // Only care dequing if there are elements.
        if((output_buffer->size > 0)) {
            // This will indeed dequeue the packet, but we may
            // have to put it back if the port isn't ready.

            queue_lock(output_buffer) ;
            dequeue(output_buffer,&in_pkt) ;
            queue_unlock(output_buffer) ;

            // Fetch the IP & lookup destination port
            ip_address_copy(&(in_pkt.address),&address);
            dest_port = cam_lookup_address(&address) ;
            if((dest_port != -1) && (dest_port < 4)) {
                // Wait for the lock
                port_lock(&(out_port[dest_port])) ;
                // If the flag is busy from the last write, then
                // we have to put the packet back in the queue and just
                // have to wait until we get to it again.
                if(out_port[dest_port].flag) {
                    element = calloc(1,sizeof(element_to_queue)) ;
                    packet_copy(&in_pkt,&(element->packet));

                    queue_lock(output_buffer) ;
                    enqueue(element,output_buffer) ;
                    queue_unlock(output_buffer) ;

                    port_unlock(&(out_port[dest_port])) ;
                    continue ;
                }
                // Port ready to be written , so go ahead and write.
                packet_copy(&in_pkt,&(out_port[dest_port].packet));
                out_port[dest_port].flag = TRUE ;
                port_unlock(&(out_port[dest_port])) ;
            }
        }
        // Make sure it tried to at least deque 5 elements, before we
        // make it sleep.
        if(loop_count > LOOP_COUNT) {
            loop_count = 0 ;
            sleep() ;
        } else
            loop_count++ ;
    }
}
Esempio n. 2
0
void *switch_thread_routine(void *arg)
{
	int i, k;	// For loops
	while (!die) {

		// Check if the in ports have any pending packets
		for (i = 0; i < 4; i++) {
			port_lock(&(in_port[i]));

			if (in_port[i].flag) {
				// If flag is set, deal with the packet
				int target_port = cam_lookup_address(&in_port[i].packet.address);
				port_lock(&(out_port[target_port]));

				// If the target out port is free, send the packet to it.
				// Else, add the current packet to the port's buffer.
				if (!out_port[target_port].flag) {
					out_port[target_port].packet.address = in_port[i].packet.address;
					out_port[target_port].packet.payload = in_port[i].packet.payload;
					out_port[target_port].flag = 1;
				} else {
					// If the buffer isn't full, add the packet.
					// Packet will be lost if the buffer is full.
					if (pending[target_port] < BUFFER_SIZE) {
						buffers[target_port][++pending[target_port]].address = in_port[i].packet.address;
						buffers[target_port][pending[target_port]].payload = in_port[i].packet.payload;
					}
				}

				in_port[i].flag = 0;
				port_unlock(&(out_port[target_port]));
			}
			port_unlock(&(in_port[i]));
		}
		
		// Check if the out ports have any pending packets in their buffers
		for (k = 0; k < 4; k++) {
			port_lock(&(out_port[k]));

			// If the output port is free and has packets in its buffer, send pcaktes to the port.
			if (!out_port[k].flag && pending[k] >= 0) {
				out_port[k].packet.address = buffers[k][pending[k]].address;
				out_port[k].packet.payload = buffers[k][pending[k]].payload;
				out_port[k].flag = 1;
				pending[k]--;
			}

			port_unlock(&(out_port[k]));
		}

	}
}
static void done_recv_request(void * args) {
    AsyncReqInfo * req = (AsyncReqInfo *) args;
    PortConnection * conn = (PortConnection *) (req)->client_data;

    port_unlock(conn);
    if (conn->connected == 0) {
        port_connection_close(conn);
        return;
    }
    if (req->u.sio.rval == 0
            || (req->u.sio.rval == -1 && req->error != EINTR)) {
        /* Check if we are in auto connect mode and server has not been
         * shut down
         */
        if (conn->server->auto_connect && conn->server->sock != -1) {
            /* Client has disconnected; don't close the connection if we
             * are in auto connect mode but simply unbind the client from
             * the port.
             */
            port_connection_unbind(conn);
        }
        else port_connection_close(conn);
        return;
    }
    port_lock(conn);
    conn->server->client_addr_len = req->u.sio.addrlen;
    send_packet(conn, req->u.sio.bufp, req->u.sio.rval);
}
static void connect_port_callback(PortConnection * conn, int error) {
    assert(is_dispatch_thread());
    port_unlock(conn);
    if (conn->shutdown_in_progress) return;
    if (error != 0) {
        port_connection_close(conn);
        return;
    }
    else {
        int idx;
        if (conn->server->connect_callback) conn->server->connect_callback(conn->server, conn->server->callback_data);
        conn->connected = 1;
        if (conn->fd != -1) {
            port_lock(conn);
            conn->recv_req.u.sio.sock = conn->fd;
            conn->recv_req.u.sio.addr = &conn->server->client_addr;
            conn->recv_req.u.sio.addrlen = sizeof(conn->server->client_addr);
            async_req_post(&conn->recv_req);
        }
        /* Send multiple TCF streams read requests in parallel; this is
         * to limit the performance impact on network with high latency.
         */
        for (idx = 0; idx < MAX_STREAM_READ; idx++) 
            read_packet(conn, idx);
    }
}
static void delete_config_done(Channel *c, void *client_data, int error) {
    PortConnection * conn = (PortConnection *) client_data;
    Trap trap;

    if (set_trap(&trap)) {
        if (!error) {
            error = read_errno(&c->inp);
            json_test_char(&c->inp, MARKER_EOM);
        }
        clear_trap(&trap);
    }
    else {
        error = trap.error;
    }
    if (!conn->auto_connect_stream) {
        protocol_send_command(conn->server->channel, "Streams", "disconnect", disconnect_stream_done, conn);
        json_write_string(&conn->server->channel->out, conn->out_stream_id);
        write_stream(&conn->server->channel->out, MARKER_EOA);
        write_stream(&conn->server->channel->out, MARKER_EOM);
    }
    else {
        loc_free(conn->out_stream_id);
        conn->out_stream_id = NULL;
        loc_free(conn->in_stream_id);
        conn->in_stream_id = NULL;
        port_unlock(conn);
        port_connection_close(conn);
    }
}
Esempio n. 6
0
void chSysUnlock(void) {

  chDbgAssert(currp->p_locks > 0,
              "chSysUnlock(), #1",
              "non-positive nesting counter");
  if (--currp->p_locks == 0)
    port_unlock();
}
Esempio n. 7
0
static void done_send_request(void * args) {
    AsyncReqInfo * req = (AsyncReqInfo *) args;
    PortConnection * conn = (PortConnection *) (req)->client_data;
    int idx = conn->send_in_progress;

    port_unlock(conn);
    conn->send_in_progress = -1;
    if (conn->connected == 0) {
        port_connection_close(conn);
        return;
    }
    if (req->u.sio.rval == 0
           || (req->u.sio.rval == -1 && req->error != EINTR)) {
        /* Check if we are in auto connect mode and server has not been
         * shutdown
         */
        if (conn->server->auto_connect && conn->server->sock != -1) {
            /* Client has disconnected; don't close the connection if we
             * are in auto connect mode but simply unbind the client from
             * the port. */
            port_connection_unbind(conn);

            /* Still read packets from the target even if no client is
             * connected. This may have to be revisited. */
            read_packet(conn, idx);
        }
        else port_connection_close(conn);
        return;
    }

    if (conn->pending_send_req != 0) {
        int next_idx;
        int loop;

        /* Get the next packet to send. In general, it is the next buffer
         * but there are some error cases (connection lost, empty packet
         * received from TCF agent) which may break this rule. */
        for (loop = 0; loop < MAX_STREAM_READ; loop++) {
            next_idx = (idx + loop) % MAX_STREAM_READ;
            if (conn->pending_send_req & (1 << next_idx))
                break;
        }
        assert (loop != MAX_STREAM_READ &&
                        (conn->pending_send_req & (1 << next_idx)));


        conn->send_in_progress = next_idx;
        conn->pending_send_req &= ~(1 << next_idx);
        conn->send_req.u.sio.bufp = conn->read_buffer[next_idx];
        conn->send_req.u.sio.bufsz = conn->read_buffer_size[next_idx];
        port_lock(conn);
        conn->send_req.u.sio.sock = conn->fd;
        conn->send_req.u.sio.addr = &conn->server->client_addr;
        conn->send_req.u.sio.addrlen = conn->server->client_addr_len;
        async_req_post(&conn->send_req);
    }
    read_packet(conn, idx);
}
Esempio n. 8
0
void *switch_thread_routine(void *arg)
{
    BOOL recv_flag=FALSE ;
    packet_t in_pkt ;
    ip_address_t address ;
    int dest_port=0 ;
    int i= 0 ;
    element_to_queue * element ;

    while(!die) {
        // Lock all 4 ports first ...
        // This is a better way to do this, since this thread may get
        // preempted and switched back to the input port writer which
        // might override the existing messages.
        for(i=0 ; i < NUM_PORTS ; i++) {
            port_lock(&(in_port[i])) ;
        }

        // Then try to unlock and check if packets are available.
        // We are sure to have saved all elements in the queue once
        // we've unlocked them
        for(i=0 ; i < NUM_PORTS ; i++) {
            if(in_port[i].flag == TRUE) {
                packet_copy(&(in_port[i].packet),&in_pkt);
                in_port[i].flag = FALSE ;
            } else {
                port_unlock(&(in_port[i])) ;
                continue ;
            }
            element = calloc(1,sizeof(element_to_queue)) ;
            packet_copy(&in_pkt,&(element->packet));

            queue_lock(output_buffer) ;
            enqueue(element,output_buffer) ;
            queue_unlock(output_buffer) ;

            port_unlock(&(in_port[i])) ;
        }

        // pause for a a little while and then check again
        sleep() ;
    }
    printf("All packets sent.. Exiting !\n") ;
}
static void send_packet_callback(PortConnection * conn, int error) {
    assert(is_dispatch_thread());
    port_unlock(conn);
    if (error != 0) {
        port_connection_close(conn);
    }
    else {
        port_lock(conn);
        conn->recv_req.u.sio.sock = conn->fd;
        conn->recv_req.u.sio.addrlen = sizeof(conn->server->client_addr);
        async_req_post(&conn->recv_req);
    }
}
Esempio n. 10
0
void unlockAnyContext(void) {
#if USE_PORT_LOCK
	port_unlock();
#else /* #if USE_PORT_LOCK */
	if (isIsrContext()) {
		chSysUnlockFromISR()
		;
	} else {
		chSysUnlock()
		;
	}
#endif /* #if USE_PORT_LOCK */
}
static void read_packet_callback(PortConnection * conn, int error, int idx,
        size_t size) {

    assert(is_dispatch_thread());
    port_unlock(conn);
    if (error != 0 || size == 0) {
        port_connection_close(conn);
    }
    else {
        conn->read_buffer_size[idx] = size;

        /* Call read hooks if any. Note that those hooks can modify the content of the
         * packets (remove characters).
         */
        if (conn->server->recv_callback) {
            conn->server->recv_callback(conn->server, conn->read_buffer[idx], &conn->read_buffer_size[idx], IN_BUF_SIZE, conn->server->callback_data);
        }

        /* If no client is connected or if the filter has removed all the packet content, 
         * do not post a send request.
         */

        if (conn->fd != -1 && conn->read_buffer_size[idx] != 0) {
            /* If there is already a send progress in request; postpone the
             * current one until it is completed.
             */
            if (conn->send_in_progress != -1) {
               conn->pending_send_req |= 1 << idx;
               return;
            }
            port_lock(conn);
            conn->send_in_progress = idx;
            assert (conn->pending_send_req == 0);
            conn->send_req.u.sio.bufp = conn->read_buffer[idx];
            conn->send_req.u.sio.bufsz = conn->read_buffer_size[idx];
            conn->send_req.u.sio.sock = conn->fd;
            conn->send_req.u.sio.addr = &conn->server->client_addr;
            conn->send_req.u.sio.addrlen = conn->server->client_addr_len;
            async_req_post(&conn->send_req);
        }
        else {
            read_packet(conn, idx);
        }
    }
}
Esempio n. 12
0
static void delete_config_done(Channel *c, void *client_data, int error) {
    PortConnection * conn = (PortConnection *) client_data;
    Trap trap;

    if (set_trap(&trap)) {
        if (!error) {
            error = read_errno(&c->inp);
            json_test_char(&c->inp, MARKER_EOM);
        }
        clear_trap(&trap);
    }
    else {
        error = trap.error;
    }
    loc_free(conn->out_stream_id);
    conn->out_stream_id = NULL;
    loc_free(conn->in_stream_id);
    conn->in_stream_id = NULL;
    port_unlock(conn);
    port_connection_close(conn);
}
Esempio n. 13
0
/**
 * @brief   Writes to a dedicated packet buffer.
 *
 * @param[in] udp       pointer to a @p stm32_usb_descriptor_t
 * @param[in] buf       buffer where to fetch the packet data
 * @param[in] n         maximum number of bytes to copy. This value must
 *                      not exceed the maximum packet size for this endpoint.
 *
 * @notapi
 */
static void usb_packet_write_from_queue(stm32_usb_descriptor_t *udp,
                                        OutputQueue *oqp, size_t n) {
  size_t nhw;
  uint32_t *pmap = USB_ADDR2PTR(udp->TXADDR0);

  udp->TXCOUNT0 = (uint16_t)n;
  nhw = n / 2;
  while (nhw > 0) {
    uint32_t w;

    w  = (uint32_t)*oqp->q_rdptr++;
    if (oqp->q_rdptr >= oqp->q_top)
      oqp->q_rdptr = oqp->q_buffer;
    w |= (uint32_t)*oqp->q_rdptr++ << 8;
    if (oqp->q_rdptr >= oqp->q_top)
      oqp->q_rdptr = oqp->q_buffer;
    *pmap++ = w;
    nhw--;
  }

  /* Last byte for odd numbers.*/
  if ((n & 1) != 0) {
    *pmap = (uint32_t)*oqp->q_rdptr++;
    if (oqp->q_rdptr >= oqp->q_top)
      oqp->q_rdptr = oqp->q_buffer;
  }

  /* Updating queue. Note, the lock is done in this unusual way because this
     function can be called from both ISR and thread context so the kind
     of lock function to be invoked cannot be decided beforehand.*/
  port_lock();
  dbg_enter_lock();

  oqp->q_counter += n;
  while (notempty(&oqp->q_waiting))
    chSchReadyI(fifo_remove(&oqp->q_waiting))->p_u.rdymsg = Q_OK;

  dbg_leave_lock();
  port_unlock();
}
Esempio n. 14
0
int main(int argc, char** argv) {
  int i, ret;

  struct sockaddr_in cliaddr;  // used by accept()
  int clilen;

  int conn_fd;  // fd for a new connection with client
  int file_fd;  // fd for file that we open for reading
  char buf[512];
  int buf_len;

  // Parse args.
  if (argc != 2) {
    fprintf(stderr, "usage: %s [port]\n", argv[0]);
    exit(1);
  }

  // Initialize server
  init_server((unsigned short) atoi(argv[1]));

  // Get file descripter table size and initize request table
  maxfd = getdtablesize();
  requestP = (request*) malloc(sizeof(request) * maxfd);
  if (requestP == NULL) {
    ERR_EXIT("out of memory allocating all requests");
  }
  for (i = 0; i < maxfd; i++) {
    init_request(&requestP[i]);
  }
  requestP[svr.listen_fd].conn_fd = svr.listen_fd;
  strcpy(requestP[svr.listen_fd].host, svr.hostname);

  // Loop for handling connections
  fprintf(stderr, "\nstarting on %.80s, port %d, fd %d, maxconn %d...\n", svr.hostname, svr.port, svr.listen_fd, maxfd);

  // my own variable
  fd_set rset, allset;
  FD_ZERO(&allset);
  FD_SET(svr.listen_fd, &allset);
  int monitor_fd = svr.listen_fd;
  int socket_fd;
  Porter* porter = port_init();
  int id, amount, price;

  while (1) {
    // TODO: Add IO multiplexing

    rset = allset;
    select(monitor_fd + 1, &rset, NULL, NULL, NULL);

    // new client connection
    if (FD_ISSET(svr.listen_fd, &rset)) {
      clilen = sizeof(cliaddr);
      conn_fd = accept(svr.listen_fd, (struct sockaddr*)&cliaddr, (socklen_t*)&clilen);
      if (conn_fd < 0) {
        if (errno == EINTR || errno == EAGAIN) continue;  // try again
        if (errno == ENFILE) {
          (void) fprintf(stderr, "out of file descriptor table ... (maxconn %d)\n", maxfd);
          continue;
        }
        ERR_EXIT("accept");
      }
      requestP[conn_fd].conn_fd = conn_fd;
      strcpy(requestP[conn_fd].host, inet_ntoa(cliaddr.sin_addr));
      fprintf(stderr, "getting a new request... fd %d from %s\n", conn_fd, requestP[conn_fd].host);

      // add new descriptor to set
      FD_SET(conn_fd, &allset);

      // extend the select() range
      if (conn_fd > monitor_fd) {
        monitor_fd = conn_fd;
      }
    }

    // check all client if data is ready
    for (i = svr.listen_fd+1; i <= monitor_fd; ++i) {
      socket_fd = requestP[i].conn_fd;
      if (FD_ISSET(socket_fd, &rset)) {
        ret = handle_read(&requestP[i]); // parse data from client to requestP[conn_fd].buf
        if (ret < 0) {
          fprintf(stderr, "bad request from %s\n", requestP[i].host);
          continue;
        }

#ifdef READ_SERVER
        id = atoi(requestP[i].buf);
        // sprintf(buf, "%s : %s\n",accept_read_header,requestP[i].buf);
        if (id <= 0 || id > 20) {
          sprintf(buf, "Operation failed.\n");
        }
        else if (port_read(porter, id, &amount, &price) == -1) {
          sprintf(buf, "This item is locked.\n");
        }
        else {
          sprintf(buf, "item%d $%d remain: %d\n", id, price, amount);
        }
        write(requestP[i].conn_fd, buf, strlen(buf));
#else
        if (requestP[i].wait_for_write != 1) {
          // wait_for_write is 1 means this socket is bidding to an item for changing
          id = atoi(requestP[i].buf);
          if (id <= 0 || id > 20) {
            sprintf(buf, "Operation failed.\n");
            write(requestP[i].conn_fd, buf, strlen(buf));
          }
          else if (port_write(porter, id) != 1) {
            sprintf(buf, "This item is locked.\n");
            write(requestP[i].conn_fd, buf, strlen(buf));
          }
          else {
            // This item is not locked by other process
            int j;
            for (j = svr.listen_fd+1; j <= monitor_fd; ++j) {
              if (j != i && requestP[j].wait_for_write == 1 && requestP[j].item == id) {
                sprintf(buf, "This item is locked.\n");
                write(requestP[i].conn_fd, buf, strlen(buf));
                break;
              }
            }
            if (j > monitor_fd) {
              // This item is not locked by other file descriptor
              sprintf(buf, "This item is modifiable.\n");
              write(requestP[i].conn_fd, buf, strlen(buf));
              requestP[i].wait_for_write = 1;
              requestP[i].item = id;
              continue;
            }
          }
        }
        else {
          // This socket is waiting for command such as buy, sell, price
          if (port_operate(porter, requestP[i].buf, buf, requestP[i].item) == 0)
            write(requestP[i].conn_fd, buf, strlen(buf));
          port_unlock(porter, requestP[i].item);
        }
#endif

        close(requestP[i].conn_fd);
        // clear socket descriptor from the set
        FD_CLR(socket_fd, &allset);
        free_request(&requestP[i]);
      }
    }
  }

  free(requestP);
  port_close(porter);
  return 0;
}
Esempio n. 15
0
void *switch_thread_routine(void *arg)
{
	int inputPortNumber, outputPortNumber, i, j;	// For loops	

	int inputPortPriorities[NUM_PORTS];
	int outputPortPriorities[NUM_PORTS];

	// ex. virtualOutputQueues[inputPort][outputPort]
	packet_t virtualOutputQueues[NUM_PORTS][NUM_PORTS][VOQ_SIZE];
	int virtualOutputQueueReadIndex[NUM_PORTS][NUM_PORTS];
	int virtualOutputQueueWriteIndex[NUM_PORTS][NUM_PORTS];

	packet_t outputBuffer[NUM_PORTS][OUTPUT_BUFFER_SIZE];
	int outputBufferReadIndex[NUM_PORTS];
	int outputBufferWriteIndex[NUM_PORTS];

	// Initialize the buffers
	memset(inputPortPriorities, 0, NUM_PORTS * sizeof(int));
	memset(outputPortPriorities, 0, NUM_PORTS * sizeof(int));

	memset(virtualOutputQueues, 0, NUM_PORTS * NUM_PORTS * VOQ_SIZE * sizeof(packet_t));
	memset(virtualOutputQueueReadIndex, 0, NUM_PORTS * NUM_PORTS * sizeof(int));
	memset(virtualOutputQueueWriteIndex, 0, NUM_PORTS * NUM_PORTS * sizeof(int));

	memset(outputBuffer, 0, NUM_PORTS * OUTPUT_BUFFER_SIZE * sizeof(packet_t));
	memset(outputBufferReadIndex, 0, NUM_PORTS * sizeof(int));
	memset(outputBufferWriteIndex, 0, NUM_PORTS * sizeof(int));

	while (!die) {

		// Check if the in ports have any pending packets
		// Add them to the appropriate VOQ
		for (inputPortNumber = 0; inputPortNumber < NUM_PORTS; inputPortNumber++) {
			port_lock(&(in_port[inputPortNumber]));
			if (in_port[inputPortNumber].flag) {
				// If flag is set, deal with the packet
				int outputPortNumber = cam_lookup_address(&in_port[inputPortNumber].packet.address);
				int queueWriteIndex = virtualOutputQueueWriteIndex[inputPortNumber][outputPortNumber];
				virtualOutputQueues[inputPortNumber][outputPortNumber][queueWriteIndex].address = in_port[inputPortNumber].packet.address;
				virtualOutputQueues[inputPortNumber][outputPortNumber][queueWriteIndex].payload = in_port[inputPortNumber].packet.payload;

				// Increment the write pointer for the virtual output queue
				virtualOutputQueueWriteIndex[inputPortNumber][outputPortNumber] = (queueWriteIndex + 1) % VOQ_SIZE;
				int voqwi = virtualOutputQueueWriteIndex[inputPortNumber][outputPortNumber];

				// Clear the flag, we've got the packet in a virtual output queue
				in_port[inputPortNumber].flag = 0;
			}
			port_unlock(&(in_port[inputPortNumber]));
		}

		// Run a round of the RR-New scheduling algorithm if any of the queues are non-empty
		// Request phase
		int totalRequests = 0;
		// ex. requests[outputPortNumber][inputPortNumber] = TRUE
		int requests[NUM_PORTS][NUM_PORTS];

		
		for (inputPortNumber = 0; inputPortNumber < NUM_PORTS; inputPortNumber++) {
			for (outputPortNumber = 0; outputPortNumber < NUM_PORTS; outputPortNumber++) {

				int queueReadIndex = virtualOutputQueueReadIndex[inputPortNumber][outputPortNumber];
				int queueWriteIndex = virtualOutputQueueWriteIndex[inputPortNumber][outputPortNumber];

				if (queueReadIndex != queueWriteIndex) {
					// Indicates the presence of a packet
					totalRequests++;
					requests[outputPortNumber][inputPortNumber] = TRUE;
				} else {
					// No request to see here...
					requests[outputPortNumber][inputPortNumber] = FALSE;
				}
			}
		}

		// We only need to go through the grant/accept phase if there were requests
		if (totalRequests > 0) {
			cellTimeCounter++;

			// Grant phase
			// ex. grantedRequests[inputPortNumber][outputPortNumber] = TRUE
			int grantedRequests[NUM_PORTS][NUM_PORTS];

			for (outputPortNumber = 0; outputPortNumber < NUM_PORTS; outputPortNumber++) {
				// Initialize granted to FALSE
				for (inputPortNumber = 0; inputPortNumber < NUM_PORTS; inputPortNumber++) {
					grantedRequests[inputPortNumber][outputPortNumber] = FALSE;
				}

				// Start at the priority pointer, look for the first request to grant
				inputPortNumber = outputPortPriorities[outputPortNumber];
				for (i = 0; i < NUM_PORTS; i++) {
					if (requests[outputPortNumber][inputPortNumber]) {
						grantedRequests[inputPortNumber][outputPortNumber] = TRUE;
						break;
					}
					inputPortNumber = (inputPortNumber + 1) % NUM_PORTS;
				}
			}

			// Accept phase
			for (inputPortNumber = 0; inputPortNumber < NUM_PORTS; inputPortNumber++) {
				// Start at the priority pointer, look for the first grant to accept
				outputPortNumber = inputPortPriorities[inputPortNumber];
				for (i = 0; i < NUM_PORTS; i++) {
					if (grantedRequests[inputPortNumber][outputPortNumber]) {
						// Accept! Send the packet over
						int queueReadIndex = virtualOutputQueueReadIndex[inputPortNumber][outputPortNumber];
						packet_t packet = virtualOutputQueues[inputPortNumber][outputPortNumber][queueReadIndex];
						virtualOutputQueueReadIndex[inputPortNumber][outputPortNumber] = (queueReadIndex + 1) % VOQ_SIZE;

						port_lock(&(out_port[outputPortNumber]));

						// If the target out port is free, send the packet to it.
						// Else, add the current packet to the port's buffer.
						if (!out_port[outputPortNumber].flag) {
							out_port[outputPortNumber].packet.address = packet.address;
							out_port[outputPortNumber].packet.payload = packet.payload;
							out_port[outputPortNumber].flag = 1;
						} else {
							// Add the packet to the output buffer to be read later
							int writeIndex = outputBufferWriteIndex[outputPortNumber];
							outputBuffer[outputPortNumber][writeIndex].address = packet.address;
							outputBuffer[outputPortNumber][writeIndex].payload = packet.payload;

							// Bump the write index
							outputBufferWriteIndex[outputPortNumber] = (writeIndex + 1) % OUTPUT_BUFFER_SIZE;
						}

						port_unlock(&(out_port[outputPortNumber]));

						// Update the priority trackers
						inputPortPriorities[inputPortNumber] = (outputPortNumber + 1) % NUM_PORTS;
						outputPortPriorities[outputPortNumber] = (inputPortNumber + 1) % NUM_PORTS;

						// We're done here...
						break;
					}

					// Haven't accepted anything yet, keep on tryin'
					outputPortNumber = (outputPortNumber + 1) % NUM_PORTS;
				}
			}
		}
		
		// Check if the out ports have any pending packets in their buffers
		for (outputPortNumber = 0; outputPortNumber < NUM_PORTS; outputPortNumber++) {
			port_lock(&(out_port[outputPortNumber]));

			// If the output port is free and has packets in its buffer, send them
			int readIndex = outputBufferReadIndex[outputPortNumber];
			int writeIndex = outputBufferWriteIndex[outputPortNumber];

			if (!out_port[outputPortNumber].flag && readIndex != writeIndex) {
				packet_t packet = outputBuffer[outputPortNumber][readIndex];
				out_port[outputPortNumber].packet.address = packet.address;
				out_port[outputPortNumber].packet.payload = packet.payload;
				out_port[outputPortNumber].flag = 1;

				outputBufferReadIndex[outputPortNumber] = (readIndex + 1) % OUTPUT_BUFFER_SIZE;
			}

			port_unlock(&(out_port[outputPortNumber]));
		}
	}
}