/****f* spin1_api.c/dispatch
*
* SUMMARY
*  This function executes callbacks which are scheduled in response to events.
*  Callbacks are completed firstly in order of priority and secondly in the
*  order in which they were enqueued.
*
*  The dispatcher is the sole consumer of the scheduler queues and so can
*  safely run with interrupts enabled. Note that deschedule(uint event_id)
*  modifies the scheduler queues which naturally influences the callbacks
*  that are dispatched by this function but not in such a way as to allow the
*  dispatcher to move the processor into an invalid state such as calling a
*  NULL function.
*
*  Upon emptying the scheduling queues the dispatcher goes into wait for
*  interrupt mode.
*
*  Potential hazard: It is possible that an event will occur -and result in
*  a callback being scheduled- AFTER the last check on the scheduler queues
*  and BEFORE the wait for interrupt call. In this case, the scheduled
*  callback would not be handled until the next event occurs and causes the
*  wait for interrupt call to return.
*
*  This hazard is avoided by calling wait for interrupt with interrupts
*  disabled! Any interrupt will still wake up the core and then
*  interrupts are enabled, allowing the core to respond to it.
*
* SYNOPSIS
*  void dispatch()
*
* SOURCE
*/
void dispatch()
{
  uint i;
  uint cpsr;
  task_queue_t *tq;
  volatile callback_t cback;

  // dispatch callbacks from queues until spin1_stop () or
  // spin1_kill () are called (run = 0)
  while (run)
  {
    i = 0;

    // disable interrupts to avoid concurrent
    // scheduler/dispatcher accesses to queues
    cpsr = spin1_int_disable ();

    while (run && i < (NUM_PRIORITIES-1))
    {
      tq = &task_queue[i];

      i++;  // prepare for next priority queue

      if(tq->start != tq->end)
      {
        cback = tq->queue[tq->start].cback;
        uint arg0 = tq->queue[tq->start].arg0;
        uint arg1 = tq->queue[tq->start].arg1;

        tq->start = (tq->start + 1) % TASK_QUEUE_SIZE;

        if(cback != NULL)
        {
          // run callback with interrupts enabled
          spin1_mode_restore (cpsr);
          cback (arg0, arg1);
          cpsr = spin1_int_disable ();

          // re-start examining queues at highest priority
          i = 0;
        }
      }
    }

    if (run)
    {
      // go to sleep with interrupts disabled to avoid hazard!
      // an interrupt will still wake up the dispatcher
      spin1_wfi ();
      spin1_mode_restore (cpsr);
    }
  }
}
/****f* spin1_api.c/spin1_flush_tx_packet_queue
*
* SUMMARY
*  This function flushes the outbound packet queue by adjusting the
*  queue pointers to make it appear empty to the consumer.
*
* SYNOPSIS
*  void spin1_flush_tx_packet_queue()
*
* SOURCE
*/
void spin1_flush_tx_packet_queue()
{
  uint cpsr = spin1_irq_disable ();

  tx_packet_queue.start = tx_packet_queue.end;

  spin1_mode_restore(cpsr);
}
/****f* spin1_api.c/deschedule
*
* SUMMARY
*  This function deschedules all callbacks corresponding to the given event
*  ID. One use for this function is to effectively discard all received
*  packets which are yet to be processed by calling
*  deschedule(MC_PACKET_RECEIVED). Note that this function cannot guarantee that
*  all callbacks pertaining to the given event ID will be descheduled: once a
*  callback has been prepared for execution by the dispatcher it is immune to
*  descheduling and will be executed upon return to the dispatcher.
*
* SYNOPSIS
*  void deschedule(uint event_id)
*
* INPUTS
*  uint event_id: event ID of the callbacks to be descheduled
*
* SOURCE
*/
void deschedule(uint event_id)
{
  uint cpsr = spin1_irq_disable ();

  task_queue_t *tq = &task_queue[callback[event_id].priority-1];

  for(uint i = 0; i < TASK_QUEUE_SIZE; i++)
  {
    if(tq->queue[i].cback == callback[event_id].cback) tq->queue[i].cback = NULL;
  }

  spin1_mode_restore(cpsr);
}
/****f* spin1_api.c/spin1_dma_transfer
*
* SUMMARY
*  This function enqueues a DMA transfer request. Requests are consumed by
*  dma_done_isr, which schedules a user callback with the ID of the completed
*  transfer and fulfils the next transfer request. If the DMA controller
*  hardware buffer is not full (which also implies that the request queue is
*  empty, given the consumer operation) then a transfer request is fulfilled
*  immediately.
*
* SYNOPSIS
*  uint spin1_dma_transfer(uint tag, void *system_address, void *tcm_address,
*                          uint direction, uint length)
*
* INPUTS
*  uint *system_address: system NOC address of the transfer
*  uint *tcm_address: processor TCM address of the transfer
*  uint direction: 0 = transfer to TCM, 1 = transfer to system
*  uint length: length of transfer in bytes
*
* OUTPUTS
*   uint: 0 if the request queue is full, DMA transfer ID otherwise
*
* SOURCE
*/
uint spin1_dma_transfer (uint tag, void *system_address, void *tcm_address,
			 uint direction, uint length)
{
  uint id = 0;
  uint cpsr = spin1_int_disable ();

  uint new_end = (dma_queue.end + 1) % DMA_QUEUE_SIZE;

  if (new_end != dma_queue.start)
  {
    id = dma_id++;

    uint desc = DMA_WIDTH << 24 | DMA_BURST_SIZE << 21
      | direction << 19 | length;

    dma_queue.queue[dma_queue.end].id = id;
    dma_queue.queue[dma_queue.end].tag = tag;
    dma_queue.queue[dma_queue.end].system_address = system_address;
    dma_queue.queue[dma_queue.end].tcm_address = tcm_address;
    dma_queue.queue[dma_queue.end].description = desc;

    /* if dmac is available and dma_queue empty trigger transfer now */
    if(!(dma[DMA_STAT] & 4) && (dma_queue.start == dma_queue.end))
    {
      dma[DMA_ADRS] = (uint) system_address;
      dma[DMA_ADRT] = (uint) tcm_address;
      dma[DMA_DESC] = desc;
    }

    dma_queue.end = new_end;
  }
  else
  {
    #if (API_WARN == TRUE) || (API_DIAGNOSTICS == TRUE)
      warnings |= DMA_QUEUE_FULL;
      dfull++;
    #endif
  }

  spin1_mode_restore(cpsr);

  return id;
}
Example #5
0
void synapses_do_timestep_update(timer_t time) {

    _print_ring_buffers(time);

    // Disable interrupts to stop DMAs interfering with the ring buffers
    uint32_t state = spin1_irq_disable();

    // Transfer the input from the ring buffers into the input buffers
    for (uint32_t neuron_index = 0; neuron_index < n_neurons;
            neuron_index++) {

        // Shape the existing input according to the included rule
        synapse_types_shape_input(input_buffers, neuron_index,
                neuron_synapse_shaping_params);

        // Loop through all synapse types
        for (uint32_t synapse_type_index = 0;
                synapse_type_index < SYNAPSE_TYPE_COUNT; synapse_type_index++) {

            // Get index in the ring buffers for the current timeslot for
            // this synapse type and neuron
            uint32_t ring_buffer_index = synapses_get_ring_buffer_index(
                time, synapse_type_index, neuron_index);

            // Convert ring-buffer entry to input and add on to correct
            // input for this synapse type and neuron
            synapse_types_add_neuron_input(input_buffers, synapse_type_index,
                    neuron_index, neuron_synapse_shaping_params,
                    synapses_convert_weight_to_input(
                        ring_buffers[ring_buffer_index],
                        ring_buffer_to_input_left_shifts[synapse_type_index]));

            // Clear ring buffer
            ring_buffers[ring_buffer_index] = 0;
        }
    }

    _print_inputs();

    // Re-enable the interrupts
    spin1_mode_restore(state);
}
/****f* spin1_api.c/clean_up
*
* SUMMARY
*  This function is called after simulation stops to configure
*  hardware for idle operation. It cleans up interrupt lines.
*
* SYNOPSIS
*  void clean_up ()
*
* SOURCE
*/
void clean_up ()
{
  uint cpsr = spin1_int_disable ();

  // Restore old SARK FIQ handler

  sark_vec->fiq_vec = old_vector;
  vic[VIC_SELECT] = old_select;
  vic[VIC_ENABLE] = old_enable;

  // Restore old SARK IRQ handler
  vic_controls[sark_vec->sark_slot] = 0x20 | CPU_INT;

  // timer1
  tc[T1_INT_CLR] = 1;   // clear possible interrupt

  // dma controller
  dma[DMA_GCTL] = 0;    // disable all IRQ sources
  dma[DMA_CTRL] = 0x3f; // Abort pending and active transfers
  dma[DMA_CTRL] = 0x0d; // clear possible transfer done and restart

  spin1_mode_restore (cpsr);
}
/****f* spin1_api.c/spin1_send_mc_packet
*
* SUMMARY
*  This function enqueues a request to send a multicast packet. If
*  the software buffer is full then a failure code is returned. If the comms
*  controller hardware buffer and the software buffer are empty then the
*  the packet is sent immediately, otherwise it is placed in a queue to be
*  consumed later by cc_tx_empty interrupt service routine.
*
* SYNOPSIS
*  uint spin1_send_mc_packet(uint key, uint data, uint load)
*
* INPUTS
*  uint key: packet routining key
*  uint data: packet payload
*  uint load: 0 = no payload (ignore data param), 1 = send payload
*
* OUTPUTS
*  1 if packet is enqueued or sent successfully, 0 otherwise
*
* SOURCE
*/
uint spin1_send_mc_packet(uint key, uint data, uint load)
{
  // TODO: This need to be re-written for SpiNNaker using the
  // TX_nof_full flag instead -- much more efficient!

  uint rc = SUCCESS;

  uint cpsr = spin1_irq_disable ();

  /* clear sticky TX full bit and check TX state */
  cc[CC_TCR] = TX_TCR_MCDEFAULT;

  if (cc[CC_TCR] & TX_FULL_MASK)
  {
    if((tx_packet_queue.end + 1) % TX_PACKET_QUEUE_SIZE == tx_packet_queue.start)
    {
      /* if queue full cannot do anything -- report failure */
      rc = FAILURE;
      #if (API_WARN == TRUE) || (API_DIAGNOSTICS == TRUE)
        warnings |= PACKET_QUEUE_FULL;
        pfull++;
      #endif
    }
    else
    {
      /* if not full queue packet */
      tx_packet_queue.queue[tx_packet_queue.end].key = key;
      tx_packet_queue.queue[tx_packet_queue.end].data = data;
      tx_packet_queue.queue[tx_packet_queue.end].load = load;

      tx_packet_queue.end = (tx_packet_queue.end + 1) % TX_PACKET_QUEUE_SIZE;

      /* turn on tx_empty interrupt (in case it was off) */
      vic[VIC_ENABLE] = (1 << CC_TMT_INT);
    }
  }
  else
  {
    if((tx_packet_queue.end + 1) % TX_PACKET_QUEUE_SIZE == tx_packet_queue.start)
    {
      /* if queue full, dequeue and send packet at the */
      /* head of the queue to make room for new packet */
      uint hkey  = tx_packet_queue.queue[tx_packet_queue.start].key;
      uint hdata = tx_packet_queue.queue[tx_packet_queue.start].data;
      uint hload = tx_packet_queue.queue[tx_packet_queue.start].load;

      tx_packet_queue.start = (tx_packet_queue.start + 1) % TX_PACKET_QUEUE_SIZE;

      if (hload)
	cc[CC_TXDATA] = hdata;

      cc[CC_TXKEY]  = hkey;
    }

    if(tx_packet_queue.start == tx_packet_queue.end)
    {
      // If queue empty send packet
      if (load)
	cc[CC_TXDATA] = data;

      cc[CC_TXKEY]  = key;

      // turn off tx_empty interrupt (in case it was on)
      vic[VIC_DISABLE] = 0x1 << CC_TMT_INT;
    }
    else
    {
      /* if not empty queue packet */
      tx_packet_queue.queue[tx_packet_queue.end].key = key;
      tx_packet_queue.queue[tx_packet_queue.end].data = data;
      tx_packet_queue.queue[tx_packet_queue.end].load = load;

      tx_packet_queue.end = (tx_packet_queue.end + 1) % TX_PACKET_QUEUE_SIZE;
    }

  }

  spin1_mode_restore(cpsr);

  return rc;
}