/****f* spin1_api.c/spin1_flush_tx_packet_queue
*
* SUMMARY
*  This function flushes the outbound packet queue by adjusting the
*  queue pointers to make it appear empty to the consumer.
*
* SYNOPSIS
*  void spin1_flush_tx_packet_queue()
*
* SOURCE
*/
void spin1_flush_tx_packet_queue()
{
  uint cpsr = spin1_irq_disable ();

  tx_packet_queue.start = tx_packet_queue.end;

  spin1_mode_restore(cpsr);
}
/****f* spin1_api.c/deschedule
*
* SUMMARY
*  This function deschedules all callbacks corresponding to the given event
*  ID. One use for this function is to effectively discard all received
*  packets which are yet to be processed by calling
*  deschedule(MC_PACKET_RECEIVED). Note that this function cannot guarantee that
*  all callbacks pertaining to the given event ID will be descheduled: once a
*  callback has been prepared for execution by the dispatcher it is immune to
*  descheduling and will be executed upon return to the dispatcher.
*
* SYNOPSIS
*  void deschedule(uint event_id)
*
* INPUTS
*  uint event_id: event ID of the callbacks to be descheduled
*
* SOURCE
*/
void deschedule(uint event_id)
{
  uint cpsr = spin1_irq_disable ();

  task_queue_t *tq = &task_queue[callback[event_id].priority-1];

  for(uint i = 0; i < TASK_QUEUE_SIZE; i++)
  {
    if(tq->queue[i].cback == callback[event_id].cback) tq->queue[i].cback = NULL;
  }

  spin1_mode_restore(cpsr);
}
Exemple #3
0
void synapses_do_timestep_update(timer_t time) {

    _print_ring_buffers(time);

    // Disable interrupts to stop DMAs interfering with the ring buffers
    uint32_t state = spin1_irq_disable();

    // Transfer the input from the ring buffers into the input buffers
    for (uint32_t neuron_index = 0; neuron_index < n_neurons;
            neuron_index++) {

        // Shape the existing input according to the included rule
        synapse_types_shape_input(input_buffers, neuron_index,
                neuron_synapse_shaping_params);

        // Loop through all synapse types
        for (uint32_t synapse_type_index = 0;
                synapse_type_index < SYNAPSE_TYPE_COUNT; synapse_type_index++) {

            // Get index in the ring buffers for the current timeslot for
            // this synapse type and neuron
            uint32_t ring_buffer_index = synapses_get_ring_buffer_index(
                time, synapse_type_index, neuron_index);

            // Convert ring-buffer entry to input and add on to correct
            // input for this synapse type and neuron
            synapse_types_add_neuron_input(input_buffers, synapse_type_index,
                    neuron_index, neuron_synapse_shaping_params,
                    synapses_convert_weight_to_input(
                        ring_buffers[ring_buffer_index],
                        ring_buffer_to_input_left_shifts[synapse_type_index]));

            // Clear ring buffer
            ring_buffers[ring_buffer_index] = 0;
        }
    }

    _print_inputs();

    // Re-enable the interrupts
    spin1_mode_restore(state);
}
/****f* spin1_api.c/spin1_send_mc_packet
*
* SUMMARY
*  This function enqueues a request to send a multicast packet. If
*  the software buffer is full then a failure code is returned. If the comms
*  controller hardware buffer and the software buffer are empty then the
*  the packet is sent immediately, otherwise it is placed in a queue to be
*  consumed later by cc_tx_empty interrupt service routine.
*
* SYNOPSIS
*  uint spin1_send_mc_packet(uint key, uint data, uint load)
*
* INPUTS
*  uint key: packet routining key
*  uint data: packet payload
*  uint load: 0 = no payload (ignore data param), 1 = send payload
*
* OUTPUTS
*  1 if packet is enqueued or sent successfully, 0 otherwise
*
* SOURCE
*/
uint spin1_send_mc_packet(uint key, uint data, uint load)
{
  // TODO: This need to be re-written for SpiNNaker using the
  // TX_nof_full flag instead -- much more efficient!

  uint rc = SUCCESS;

  uint cpsr = spin1_irq_disable ();

  /* clear sticky TX full bit and check TX state */
  cc[CC_TCR] = TX_TCR_MCDEFAULT;

  if (cc[CC_TCR] & TX_FULL_MASK)
  {
    if((tx_packet_queue.end + 1) % TX_PACKET_QUEUE_SIZE == tx_packet_queue.start)
    {
      /* if queue full cannot do anything -- report failure */
      rc = FAILURE;
      #if (API_WARN == TRUE) || (API_DIAGNOSTICS == TRUE)
        warnings |= PACKET_QUEUE_FULL;
        pfull++;
      #endif
    }
    else
    {
      /* if not full queue packet */
      tx_packet_queue.queue[tx_packet_queue.end].key = key;
      tx_packet_queue.queue[tx_packet_queue.end].data = data;
      tx_packet_queue.queue[tx_packet_queue.end].load = load;

      tx_packet_queue.end = (tx_packet_queue.end + 1) % TX_PACKET_QUEUE_SIZE;

      /* turn on tx_empty interrupt (in case it was off) */
      vic[VIC_ENABLE] = (1 << CC_TMT_INT);
    }
  }
  else
  {
    if((tx_packet_queue.end + 1) % TX_PACKET_QUEUE_SIZE == tx_packet_queue.start)
    {
      /* if queue full, dequeue and send packet at the */
      /* head of the queue to make room for new packet */
      uint hkey  = tx_packet_queue.queue[tx_packet_queue.start].key;
      uint hdata = tx_packet_queue.queue[tx_packet_queue.start].data;
      uint hload = tx_packet_queue.queue[tx_packet_queue.start].load;

      tx_packet_queue.start = (tx_packet_queue.start + 1) % TX_PACKET_QUEUE_SIZE;

      if (hload)
	cc[CC_TXDATA] = hdata;

      cc[CC_TXKEY]  = hkey;
    }

    if(tx_packet_queue.start == tx_packet_queue.end)
    {
      // If queue empty send packet
      if (load)
	cc[CC_TXDATA] = data;

      cc[CC_TXKEY]  = key;

      // turn off tx_empty interrupt (in case it was on)
      vic[VIC_DISABLE] = 0x1 << CC_TMT_INT;
    }
    else
    {
      /* if not empty queue packet */
      tx_packet_queue.queue[tx_packet_queue.end].key = key;
      tx_packet_queue.queue[tx_packet_queue.end].data = data;
      tx_packet_queue.queue[tx_packet_queue.end].load = load;

      tx_packet_queue.end = (tx_packet_queue.end + 1) % TX_PACKET_QUEUE_SIZE;
    }

  }

  spin1_mode_restore(cpsr);

  return rc;
}