static void
read_counter (struct hw *me,
	      struct mn103tim *timers,
	      int timer_nr,
	      void *dest,
	      unsigned  nr_bytes)
{
  unsigned32 val;

  if ( NULL == timers->timer[timer_nr].event )
    {
      /* Timer is not counting, use value in base register. */
      if ( timer_nr == 6 )
	{
	  val = 0;  /* timer 6 is an up counter */
	}
      else
	{
	  val = timers->reg[timer_nr].base;
	}
    }
  else
    {
      if ( timer_nr == 6 )  /* timer 6 is an up counter. */
	{
	  val = hw_event_queue_time(me) - timers->timer[timer_nr].start;
	}
      else
	{
	  /* ticks left = start time + div ratio - curr time */
	  /* Cannot use base register because it can be written during counting and it
	     doesn't affect counter until underflow occurs. */
	  
	  val = timers->timer[timer_nr].start + timers->timer[timer_nr].div_ratio
	    - hw_event_queue_time(me);
	}
    }

  switch (nr_bytes) {
  case 1:
    *(unsigned8 *)dest = val;
    break;
    
  case 2:
    *(unsigned16 *)dest = val;
    break;

  case 4:
    *(unsigned32 *)dest = val;
    break;

  default:
    hw_abort(me, "bad read size for reading counter");
  }
      
}
static void
do_counter6_event (struct hw *me,
		  void *data)
{
  struct mn103tim *timers = hw_data(me);
  long timer_nr = (long) data;
  int next_timer;

  /* Check if counting is still enabled. */
  if ( (timers->reg[timer_nr].mode & count_mask) != 0 )
    {
      /* Generate an interrupt for the timer underflow (TIMERn_UFLOW). */
      hw_port_event (me, timer_nr, 1);

      /* Schedule next timeout.  */
      timers->timer[timer_nr].start = hw_event_queue_time(me);
      /* FIX: Check if div_ratio has changed and if it's now 0. */
      timers->timer[timer_nr].event
	= hw_event_queue_schedule (me, timers->timer[timer_nr].div_ratio,
				   do_counter6_event, (void *)timer_nr);
    }
  else
    {
      timers->timer[timer_nr].event = NULL;
    }

}
Example #3
0
static void
test_handler (struct hw *me,
	      void *data)
{
  int *n = data;
  if (*n != hw_event_queue_time (me))
    abort ();
  *n = -(*n);
}
Example #4
0
static void
do_counter_value (struct hw *me,
		  hw_pal_device *pal,
		  const char *reg,
		  hw_pal_counter *counter,
		  unsigned32 *word,
		  unsigned nr_bytes)
{
  unsigned32 val;
  if (nr_bytes != 4)
    hw_abort (me, "%s - bad read size must be 4 bytes", reg);
  if (counter->delta != 0)
    val = (counter->start + counter->delta
	   - hw_event_queue_time (me));
  else
    val = 0;
  HW_TRACE ((me, "read - %s %ld", reg, (long) val));
  *word = H2BE_4 (val);
}
Example #5
0
static void
do_counter_event (struct hw *me,
		  void *data)
{
  hw_pal_counter *counter = (hw_pal_counter *) data;
  if (counter->periodic_p)
    {
      HW_TRACE ((me, "timer expired"));
      counter->start = hw_event_queue_time (me);
      hw_port_event (me, TIMER_PORT, 1);
      hw_event_queue_schedule (me, counter->delta, do_counter_event, counter);
    }
  else
    {
      HW_TRACE ((me, "countdown expired"));
      counter->delta = 0;
      hw_port_event (me, COUNTDOWN_PORT, 1);
    }
}
Example #6
0
static void
deliver_cris_interrupt (struct hw *me, void *data)
{
  struct cris_hw *crishw = hw_data (me);
  SIM_DESC simulator = hw_system (me);
  sim_cpu *cpu = STATE_CPU (simulator, 0);
  unsigned int intno = crishw->pending_vector;

 if (CPU_CRIS_DELIVER_INTERRUPT (cpu) (cpu, CRIS_INT_INT, intno))
    {
      crishw->pending_vector = 0;
      crishw->pending_handler = NULL;
      return;
    }

 {
   /* Bug workaround: at time T with a pending number of cycles N to
      process, if re-scheduling an event at time T+M, M < N,
      sim_events_process gets stuck at T (updating the "time" to
      before the event rather than after the event, or somesuch).

      Hacking this locally is thankfully easy: if we see the same
      simulation time, increase the number of cycles.  Do this every
      time we get here, until a new time is seen (supposedly unstuck
      re-delivery).  (Fixing in SIM/GDB source will hopefully then
      also be easier, having a tangible test-case.)  */
   static signed64 last_events_time = 0;
   static signed64 delta = 1;
   signed64 this_events_time = hw_event_queue_time (me);

   if (this_events_time == last_events_time)
     delta++;
   else
     {
       delta = 1;
       last_events_time = this_events_time;
     }

   crishw->pending_handler
     = hw_event_queue_schedule (me, delta, deliver_cris_interrupt, NULL);
 }
}
Example #7
0
static void
do_counter_write (struct hw *me,
		  hw_pal_device *pal,
		  const char *reg,
		  hw_pal_counter *counter,
		  const unsigned32 *word,
		  unsigned nr_bytes)
{
  if (nr_bytes != 4)
    hw_abort (me, "%s - bad write size must be 4 bytes", reg);
  if (counter->handler != NULL)
    {
      hw_event_queue_deschedule (me, counter->handler);
      counter->handler = NULL;
    }
  counter->delta = BE2H_4 (*word);
  counter->start = hw_event_queue_time (me);
  HW_TRACE ((me, "write - %s %ld", reg, (long) counter->delta));
  if (counter->delta > 0)
    hw_event_queue_schedule (me, counter->delta, do_counter_event, counter);
}
static void
do_counter_event (struct hw *me,
		  void *data)
{
  struct mn103tim *timers = hw_data(me);
  long timer_nr = (long) data;
  int next_timer;

  /* Check if counting is still enabled. */
  if ( (timers->reg[timer_nr].mode & count_mask) != 0 )
    {
      /* Generate an interrupt for the timer underflow (TIMERn_UFLOW). */

      /* Port event occurs on port of last cascaded timer. */
      /* This works across timer range from 0 to NR_REG_TIMERS because */
      /* the first 16 bit timer (timer 4) is not allowed to be set as  */
      /* a cascading timer. */
      for ( next_timer = timer_nr+1; next_timer < NR_REG_TIMERS; ++next_timer )
	{
	  if ( (timers->reg[next_timer].mode & clock_mask) != clk_cascaded )
	    {
	      break;
	    }
	}
      hw_port_event (me, next_timer-1, 1);

      /* Schedule next timeout.  */
      timers->timer[timer_nr].start = hw_event_queue_time(me);
      /* FIX: Check if div_ratio has changed and if it's now 0. */
      timers->timer[timer_nr].event
	= hw_event_queue_schedule (me, timers->timer[timer_nr].div_ratio,
				   do_counter_event, (void *)timer_nr);
    }
  else
    {
      timers->timer[timer_nr].event = NULL;
    }

}
static void
write_tm6md (struct hw *me,
	     struct mn103tim *timers,
	     unsigned_word address,
	     const void *source,
	     unsigned nr_bytes)
{
  unsigned8 mode_val0 = 0x00, mode_val1 = 0x00;
  unsigned32 div_ratio;
  long timer_nr = 6;

  unsigned_word offset = address - timers->block[0].base;
  
  if ((offset != 0x84 && nr_bytes > 1) || nr_bytes > 2 )
    {
      hw_abort (me, "Bad write size of %d bytes to TM6MD", nr_bytes);
    }

  if ( offset == 0x84 )  /* address of TM6MD */
    {
      /*  Fill in first byte of mode */
      mode_val0 = *(unsigned8 *)source;
      timers->tm6md0 = mode_val0;
    
      if ( ( mode_val0 & 0x26 ) != 0 )
	{
	  hw_abort(me, "Cannot write to bits 5, 3, and 2 of TM6MD");
	}
    }
  
  if ( offset == 0x85 || nr_bytes == 2 )
    {
      /*  Fill in second byte of mode */
      if ( nr_bytes == 2 )
	{
	  mode_val1 = *(unsigned8 *)source+1;
	}
      else
	{
	  mode_val1 = *(unsigned8 *)source;
	}

      timers->tm6md1 = mode_val1;

      if ( ( mode_val1 & count_and_load_mask ) == count_and_load_mask )
	{
	  hw_abort(me, "Cannot load base reg and start counting simultaneously.");
	}
      if ( ( mode_val1 & bits0to2_mask ) != 0 )
	{
	  hw_abort(me, "Cannot write to bits 8 to 10 of TM6MD");
	}
    }

  if ( mode_val1 & count_mask )
    {
      /* - de-schedule any previous event. */
      /* - add new event to queue to start counting. */
      /* - assert that counter == base reg? */

      div_ratio = timers->tm6ca;  /* binary counter for timer 6 */
      timers->timer[timer_nr].div_ratio = div_ratio;
      if ( NULL != timers->timer[timer_nr].event )
	{
	  hw_event_queue_deschedule (me, timers->timer[timer_nr].event);
	  timers->timer[timer_nr].event = NULL;
	}

      if ( div_ratio > 0 )
	{
	  /* Set start time. */
	  timers->timer[timer_nr].start = hw_event_queue_time(me);
	  timers->timer[timer_nr].event
	    = hw_event_queue_schedule(me, div_ratio,
				      do_counter6_event,
				      (void *)(timer_nr)); 
	}
    }
  else
    {
      /* Turn off counting */
      if ( NULL != timers->timer[timer_nr].event )
	{
	  hw_event_queue_deschedule (me, timers->timer[timer_nr].event);
	  timers->timer[timer_nr].event = NULL;
	}
    }
}
static void
write_mode_reg (struct hw *me,
		struct mn103tim *timers,
		long timer_nr,
		const void *source,
		unsigned nr_bytes)
     /* for timers 0 to 5 */
{
  unsigned i;
  unsigned8 mode_val, next_mode_val;
  unsigned32 div_ratio;

  if ( nr_bytes != 1 )
    {
      hw_abort (me, "bad write size of %d bytes to TM%ldMD.", nr_bytes,
		timer_nr);
    }

  mode_val = *(unsigned8 *)source;
  timers->reg[timer_nr].mode = mode_val;
      
  if ( ( mode_val & count_and_load_mask ) == count_and_load_mask )
    {
      hw_abort(me, "Cannot load base reg and start counting simultaneously.");
    }
  if ( ( mode_val & bits2to5_mask ) != 0 )
    {
      hw_abort(me, "Cannot write to bits 2 to 5 of mode register");
    }

  if ( mode_val & count_mask )
    {
      /* - de-schedule any previous event. */
      /* - add new event to queue to start counting. */
      /* - assert that counter == base reg? */

      /* For cascaded timers, */
      if ( (mode_val & clock_mask) == clk_cascaded )
	{
	  if ( timer_nr == 0 || timer_nr == 4 )
	    {
	      hw_abort(me, "Timer %ld cannot be cascaded.", timer_nr);
	    }
	}
      else
	{
	  div_ratio = timers->reg[timer_nr].base;

	  /* Check for cascading. */
	  if ( timer_nr < NR_8BIT_TIMERS )
	    {
	      for ( i = timer_nr + 1; i <= 3; ++i ) 
		{
		  next_mode_val = timers->reg[i].mode;
		  if ( ( next_mode_val & clock_mask ) == clk_cascaded )
		    {
		      /* Check that CNE is on. */
		      if ( ( next_mode_val & count_mask ) == 0 ) 
			{
			  hw_abort (me, "cascaded timer not ready for counting");
			}
		      ASSERT(timers->timer[i].event == NULL);
		      ASSERT(timers->timer[i].div_ratio == 0);
		      div_ratio = div_ratio
			| (timers->reg[i].base << (8*(i-timer_nr)));
		    }
		  else
		    {
		      break;
		    }
		}
	    }
	  else
	    {
	      /* Mode register for a 16 bit timer */
	      next_mode_val = timers->reg[timer_nr+1].mode;
	      if ( ( next_mode_val & clock_mask ) == clk_cascaded )
		{
		  /* Check that CNE is on. */
		  if ( ( next_mode_val & count_mask ) == 0 ) 
		    {
		      hw_abort (me, "cascaded timer not ready for counting");
		    }
		  ASSERT(timers->timer[timer_nr+1].event == NULL);
		  ASSERT(timers->timer[timer_nr+1].div_ratio == 0);
		  div_ratio = div_ratio | (timers->reg[timer_nr+1].base << 16);
		}
	    }

	  timers->timer[timer_nr].div_ratio = div_ratio;

	  if ( NULL != timers->timer[timer_nr].event )
	    {
	      hw_event_queue_deschedule (me, timers->timer[timer_nr].event);
	      timers->timer[timer_nr].event = NULL;
	    }

	  if ( div_ratio > 0 )
	    {
	      /* Set start time. */
	      timers->timer[timer_nr].start = hw_event_queue_time(me);
	      timers->timer[timer_nr].event
		= hw_event_queue_schedule(me, div_ratio,
					  do_counter_event,
					  (void *)(timer_nr)); 
	    }
	}
    }
  else
    {
      /* Turn off counting */
      if ( NULL != timers->timer[timer_nr].event )
	{
	  ASSERT((timers->reg[timer_nr].mode & clock_mask) != clk_cascaded);
	  hw_event_queue_deschedule (me, timers->timer[timer_nr].event);
	  timers->timer[timer_nr].event = NULL;
	}
      else
	{
	  if ( (timers->reg[timer_nr].mode & clock_mask) == clk_cascaded )
	    {
	      ASSERT(timers->timer[timer_nr].event == NULL);
	    }
	}
      
    }

}