Exemplo n.º 1
0
void OS::halt() {
  auto cycles_before = solo5_clock_monotonic();
#if defined(ARCH_x86)
  asm volatile("hlt");
#else
#warning "OS::halt() not implemented for selected arch"
#endif
  // Count sleep nanos
  os_cycles_hlt += solo5_clock_monotonic() - cycles_before;
}
Exemplo n.º 2
0
void OS::block()
{
  // Increment level
  blocking_level += 1;

  // Increment highest if applicable
  if (blocking_level > highest_blocking_level)
      highest_blocking_level = blocking_level;

  int rc;
  rc = solo5_poll(solo5_clock_monotonic() + 50000ULL); // now + 0.05 ms
  if (rc == 0) {
    Timers::timers_handler();
  } else {

    for(auto& nic : hw::Devices::devices<hw::Nic>()) {
      nic->poll();
      break;
    }

  }

  // Decrement level
  blocking_level -= 1;
}
Exemplo n.º 3
0
void OS::event_loop()
{
  while (power_) {
    int rc;

    // add a global symbol here so we can quickly discard
    // event loop from stack sampling
    asm volatile(
    ".global _irq_cb_return_location;\n"
    "_irq_cb_return_location:" );

    // XXX: temporarily ALWAYS sleep for 0.5 ms. We should ideally ask Timers
    // for the next immediate timer to fire (the first from the "scheduled" list
    // of timers?)
    rc = solo5_poll(solo5_clock_monotonic() + 500000ULL); // now + 0.5 ms
    Timers::timers_handler();
    if (rc) {
      for(auto& nic : hw::Devices::devices<hw::Nic>()) {
        nic->poll();
        break;
      }
    }
  }


  MYINFO("Stopping service");
  Service::stop();

  MYINFO("Powering off");
  solo5_poweroff();
}
Exemplo n.º 4
0
Arquivo: net.c Projeto: mato/solo5
bool solo5_yield(solo5_time_t deadline)
{
    struct sys_timespec ts;
    int rc;
    struct sys_pollfd fds[1];
    int nfds = 0;
    uint64_t now, timeout_nsecs;

    now = solo5_clock_monotonic();
    if (deadline <= now)
        timeout_nsecs = 0;
    else
        timeout_nsecs = deadline - now;

    ts.tv_sec = timeout_nsecs / NSEC_PER_SEC;
    ts.tv_nsec = timeout_nsecs % NSEC_PER_SEC;

    if (netfd >= 0) {
        fds[nfds].fd = netfd;
        fds[nfds].events = SYS_POLLIN;
        nfds++;
    }

    do {
        rc = sys_ppoll(fds, nfds, &ts);
    } while (rc == SYS_EINTR);
    assert(rc >= 0);
    
    return rc;
}
Exemplo n.º 5
0
bool solo5_yield(uint64_t deadline)
{
    bool rc = false;
    do {
        if (muen_net_pending_data()) {
            rc = true;
            break;
        }
        __asm__ __volatile__("pause");
    } while (solo5_clock_monotonic() < deadline);

    if (muen_net_pending_data()) {
        rc = true;
    }

    return rc;
}
Exemplo n.º 6
0
int solo5_poll(uint64_t until_nsecs)
{
    int rc = 0;

    /*
     * cpu_block() as currently implemented will only poll for the maximum time
     * the PIT can be run in "one shot" mode. Loop until either I/O is possible
     * or the desired time has been reached.
     */
    interrupts_disable();
    do {
        if (virtio_net_pkt_poll()) {
            rc = 1;
            break;
        }

        cpu_block(until_nsecs);
    } while (solo5_clock_monotonic() < until_nsecs);
    if (!rc)
        rc = virtio_net_pkt_poll();
    interrupts_enable();

    return rc;
}
Exemplo n.º 7
0
/*
 * Returns early if any interrupts are serviced, or if the requested delay is
 * too short. Must be called with interrupts disabled, will enable interrupts
 * "atomically" during idle loop.
 */
void cpu_block(uint64_t until) {
    uint64_t now, delta_ns;
    uint64_t delta_ticks;
    unsigned int ticks;
    int s;

    assert(spldepth > 0);

    /*
     * Return if called too late.  Doing do ensures that the time
     * delta is positive.
     */
    now = solo5_clock_monotonic();
    if (until <= now)
        return;

    /*
     * Compute delta in PIT ticks. Return if it is less than minimum safe
     * amount of ticks.  Essentially this will cause us to spin until
     * the timeout.
     */
    delta_ns = until - now;
    delta_ticks = mul64_32(delta_ns, pit_mult);
    if (delta_ticks < PIT_MIN_DELTA) {
        /*
         * Since we are "spinning", quickly enable interrupts in
         * the hopes that we might get new work and can do something
         * else than spin.
         */
        __asm__ __volatile__(
            "sti;\n"
            "nop;\n"    /* ints are enabled 1 instr after sti */
            "cli;\n");
        return;
    }

    /*
     * Program the timer to interrupt the CPU after the delay has expired.
     * Maximum timer delay is 65535 ticks.
     */
    if (delta_ticks > 65535)
        ticks = 65535;
    else
        ticks = delta_ticks;

    /*
     * Note that according to the Intel 82C54 datasheet, p12 the
     * interrupt is actually delivered in N + 1 ticks.
     */
    outb(TIMER_CNTR, (ticks - 1) & 0xff);
    outb(TIMER_CNTR, (ticks - 1) >> 8);

    /*
     * Wait for any interrupt. If we got an interrupt then
     * just return into the scheduler which will check if there is
     * work to do and send us back here if not.
     *
     * TODO: It would be more efficient for longer sleeps to be
     * able to distinguish if the interrupt was the PIT interrupt
     * and no other, but this will do for now.
     */
     s = spldepth;
     spldepth = 0;
     __asm__ __volatile__(
         "sti;\n"
         "hlt;\n"
         "cli;\n");
     spldepth = s;
}
Exemplo n.º 8
0
void OS::start(char* _cmdline, uintptr_t mem_size)
{
  // Initialize stdout handlers
  OS::add_stdout(&OS::default_stdout);

  PROFILE("");
  // Print a fancy header
  CAPTION("#include<os> // Literally");

  void* esp = get_cpu_esp();
  MYINFO("Stack: %p", esp);

  /// STATMAN ///
  /// initialize on page 7, 2 pages in size
  Statman::get().init(0x6000, 0x3000);

  OS::cmdline = _cmdline;

  // setup memory and heap end
  OS::memory_end_ = mem_size;
  OS::heap_max_ = OS::memory_end_;

  // Call global ctors
  PROFILE("Global constructors");
  __libc_init_array();


  PROFILE("Memory map");
  // Assign memory ranges used by the kernel
  auto& memmap = memory_map();
  MYINFO("Assigning fixed memory ranges (Memory map)");

  memmap.assign_range({0x500, 0x5fff, "solo5", "solo5"});
  memmap.assign_range({0x6000, 0x8fff, "Statman", "Statistics"});
  memmap.assign_range({0xA000, 0x9fbff, "Stack", "Kernel / service main stack"});
  memmap.assign_range({(uintptr_t)&_LOAD_START_, (uintptr_t)&_end,
        "ELF", "Your service binary including OS"});

  Expects(::heap_begin and heap_max_);
  // @note for security we don't want to expose this
  memmap.assign_range({(uintptr_t)&_end + 1, ::heap_begin - 1,
        "Pre-heap", "Heap randomization area"});

  uintptr_t span_max = std::numeric_limits<std::ptrdiff_t>::max();
  uintptr_t heap_range_max_ = std::min(span_max, heap_max_);

  MYINFO("Assigning heap");
  memmap.assign_range({::heap_begin, heap_range_max_,
        "Heap", "Dynamic memory", heap_usage });

  MYINFO("Printing memory map");
  for (const auto &i : memmap)
    INFO2("* %s",i.second.to_string().c_str());

  extern void __platform_init();
  __platform_init();

  MYINFO("Booted at monotonic_ns=%lld walltime_ns=%lld",
         solo5_clock_monotonic(), solo5_clock_wall());

  Solo5_manager::init();

  // We don't need a start or stop function in solo5.
  Timers::init(
    // timer start function
    [] (std::chrono::microseconds) {},
    // timer stop function
    [] () {});

  // Some tests are asserting there is at least one timer that is always ON
  // (the RTC calibration timer). Let's fake some timer so those tests pass.
  Timers::oneshot(std::chrono::hours(1000000), [] (auto) {});

  Timers::ready();
}
Exemplo n.º 9
0
// uptime in nanoseconds
RTC::timestamp_t OS::uptime()
{
  return solo5_clock_monotonic() - booted_at_;
}