Exemplo n.º 1
0
struct bendData *
getBendData(int element_center,
            enum hybridization centerHybridization,
            int element1,
            char bondOrder1,
            int element2,
            char bondOrder2)
{
  struct bendData *bend;
  char bendName[256]; // XXX Can overflow for long atom type names
  
  generateBendName(bendName, element_center, centerHybridization, element1, bondOrder1, element2, bondOrder2);
  bend = (struct bendData *)hashtable_get(bendDataHashtable, bendName);
  if (bend == NULL) {
    bend = generateGenericBendData(bendName, element_center, centerHybridization, element1, bondOrder1, element2, bondOrder2);
  }
  if (bend == NULL) {
    return NULL;
  }
  if (bend->parameterQuality < QualityWarningLevel && !bend->warned) {
    if (!ComputedParameterWarning) {
      WARNING("Using a reduced quality parameter, see the trace output for details");
      ComputedParameterWarning = 1;
    }
    INFO2("Using quality %d parameters for %s bend", bend->parameterQuality, bendName);
    INFO2("Computed kb: %e, theta0: %e", bend->kb, bend->theta0);
    bend->warned = 1;
  }
  return bend;
}
Exemplo n.º 2
0
  PCI_Device::PCI_Device(const uint16_t pci_addr, const uint32_t device_id)
      : pci_addr_{pci_addr}, device_id_{device_id}
  {
    //We have device, so probe for details
    devtype_.reg = read_dword(pci_addr, PCI::CONFIG_CLASS_REV);

    //printf("\t[*] New PCI Device: Vendor: 0x%x Prod: 0x%x Class: 0x%x\n", 
    //device_id.vendor,device_id.product,classcode);
    
    INFO2("|");  
    
    switch (devtype_.classcode) {
    case PCI::BRIDGE:
      INFO2("+--+ %s %s (0x%x)",
            bridge_subclasses[devtype_.subclass < SS_BR ? devtype_.subclass : SS_BR-1],
            classcodes[devtype_.classcode],devtype_.subclass);
      break;

    case PCI::NIC:
      INFO2("+--+ %s %s (0x%x)",
            nic_subclasses[devtype_.subclass < SS_NIC ? devtype_.subclass : SS_NIC-1],
            classcodes[devtype_.classcode],devtype_.subclass);
      break;

    default:
      if (devtype_.classcode < NUM_CLASSCODES) {
        INFO2("+--+ %s ",classcodes[devtype_.classcode]);
      } else {
        INFO2("\t +--+ Other (Classcode 0x%x) \n",devtype_.classcode);
      } 
    } //< switch (devtype_.classcode)
  }
Exemplo n.º 3
0
// for the case pure stop the world
static void gc_partial_con_PSTW( GC *gc) {
  int64 time_collection_start = time_now();
  INFO2("gc.space.stat","Stop-the-world collection = "<<gc->num_collections<<"");
  INFO2("gc.con.info", "from last check point =" << (unsigned int)(time_collection_start -get_last_check_point()) );
  // stop the world enumeration
  gc->num_collections++;
  int disable_count = hythread_reset_suspend_disable();  
  gc_set_rootset_type(ROOTSET_IS_REF);
  gc_prepare_rootset(gc);
   
  if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) {
      unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc);
      Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
      con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size;
  }
  
  //reclaim heap 
  gc_reset_mutator_context(gc);
  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
  gc_ms_reclaim_heap((GC_MS*)gc);

  //update live size
  gc_PSTW_update_stat_after_marking(gc);
  	
  // reset the collection and resume mutators
  gc_reset_after_con_collection(gc);

  set_con_nil(gc); // concurrent scheduling will continue after mutators are resumed
  vm_resume_threads_after();
  assert(hythread_is_suspend_enabled());
  hythread_set_suspend_disable(disable_count);
}
Exemplo n.º 4
0
// for the case concurrent marking is not finished before heap is exhausted
static void gc_partial_con_PMSS(GC *gc) {
  INFO2("gc.con.info", "[PMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
  // wait concurrent marking finishes
  int64 wait_start = time_now();
  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed
  //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance
  while( gc->gc_concurrent_status == GC_CON_START_MARKERS || 
  	      gc->gc_concurrent_status == GC_CON_TRACING || 
  	      gc->gc_concurrent_status == GC_CON_TRACE_DONE) 
  {
      vm_thread_yield(); //let the unfinished marker run
  }

  /*just debugging*/
    gc_ms_get_current_heap_usage((GC_MS *)gc);
    int64 pause_time = time_now() - wait_start;
    INFO2("gc.con.info", "[PMSS]wait marking time="<<pause_time<<" us" );
    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
    unsigned int marking_time_shortage = (unsigned int)(con_collection_stat->marking_end_time - wait_start);
    INFO2("gc.con.info", "[PMSS] marking late time [" << marking_time_shortage << "] us" );
	    
  // start STW reclaiming heap
  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
  gc_reset_mutator_context(gc);
  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
  gc_ms_reclaim_heap((GC_MS*)gc);
  
  // reset after partial stop the world collection
  gc_reset_after_con_collection(gc);
  set_con_nil(gc);
}
Exemplo n.º 5
0
void PCI_manager::init()
{
  INFO("PCI Manager","Probing PCI bus");
  
  /* 
     Probe the PCI bus
     - Assuming bus number is 0, there are 255 possible addresses  
  */
  uint32_t id = PCI::WTF;
  
  for (uint16_t pci_addr = 0; pci_addr < 255; pci_addr++)
    {
      id = PCI_Device::read_dword(pci_addr, PCI::CONFIG_VENDOR);
      if (id != PCI::WTF)
	{
	  
	  PCI_Device dev (pci_addr,id);
	  devices[dev.classcode()].emplace_back(dev);
	}
      
    }
  
  // Pretty printing, end of device tree
  // @todo should probably be moved, or optionally non-printed
  INFO2("|");
  INFO2("o");
  
}
Exemplo n.º 6
0
//just debugging
inline void gc_ms_get_current_heap_usage(GC_MS *gc)
{
  Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat(gc);
  unsigned int new_obj_size = gc_get_mutator_new_obj_size((GC *)gc);
  unsigned int current_size = con_collection_stat->surviving_size_at_gc_end + new_obj_size;
  INFO2("gc.con.scheduler", "[Heap Usage]surviving_size("<<con_collection_stat->surviving_size_at_gc_end<<")+new_obj_size("<<new_obj_size << ")="<<current_size<<" bytes");
  INFO2("gc.con.scheduler", "[Heap Usage]usage rate ("<< (float)current_size/gc->committed_heap_size<<")");
}
Exemplo n.º 7
0
int alert_push(int event, const char *msg)
{
	/* 日志以及调试功能回调入口 */

	switch(event){
		case ERR_BASE:
		case ERR_VERIFY:
		case ERR_BRIDGE:
		case ERR_PORT:
		case ERR_CTRL_BRIDGE:
		case ERR_CTRL_PORT:
			ERROR2(GLOBAL_OUT_GROUP,"%s\n", msg);
			break;
		case ERR_MSTI:
			ERROR2(MSTI_GROUP,"%s\n", msg);
			break;
		case ALERT_BASE:
		case ALERT_BRIDGE:
		case ALERT_PORT:
			break;
		case INFO_BRIDGE_TXBPDU:
		case INFO_PORT_TXBPDU:
			INFO2(BPDU_TX_GROUP,"%s\n", msg);
			break;
		case DEBUG_TXBPDU:
			INFO3(BPDU_TX_GROUP,"%s\n", msg);
			break;
		case INFO_BRIDGE_RXBPDU:
		case INFO_PORT_RXBPDU:
			INFO2(BPDU_RX_GROUP,"%s\n", msg);
			break;
		case DEBUG_RXBPDU:
			INFO3(BPDU_RX_GROUP,"%s\n", msg);
			break;
		case INFO_MSTI:
			INFO2(MSTI_GROUP,"%s\n", msg);
			break;
		case INFO_BASE:
		case INFO_BRIDGE:
		case INFO_PORT:
		case INFO_CTRL_BRIDGE:
		case INFO_CTRL_PORT:
		case INFO_CTRL_BRIDGE_MAC:
			INFO2(GLOBAL_OUT_GROUP,"%s\n", msg);
			break;
		case DEBUG_INFO:
			break;
		case NOTIFY_MSG:
			send_notify(event, msg);
			break;
		default:
			break;
	};


	return 0;
}
Exemplo n.º 8
0
void gc_PSTW_update_stat_after_marking(GC *gc)
{
  unsigned int size_live_obj = gc_ms_get_live_object_size((GC_MS*)gc);
  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
  con_collection_stat->live_size_marked = size_live_obj;
  con_collection_stat->alloc_size_before_alloc_live = gc_get_mutator_new_obj_size(gc);
  	
  INFO2("gc.con.scheduler", "[Mark Finish] live_marked:      "<<con_collection_stat->live_size_marked<<" bytes");
  INFO2("gc.con.scheduler", "[Mark Finish] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
  INFO2("gc.con.scheduler", "[Mark Finish] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
}
Exemplo n.º 9
0
  void PCI_Device::probe_resources() noexcept {
    //Find resources on this PCI device (scan the BAR's)
    uint32_t value {PCI::WTF};
  
    uint32_t reg {0};
    uint32_t len {0};

    for(int bar {0}; bar < 6; ++bar) {
      //Read the current BAR register 
      reg = PCI::CONFIG_BASE_ADDR_0 + (bar << 2);
      value = read_dword(reg);

      if (!value) continue;

      //Write all 1's to the register, to get the length value (osdev)
      write_dword(reg, 0xFFFFFFFF);
      len = read_dword(reg);
    
      //Put the value back
      write_dword(reg, value);
    
      uint32_t unmasked_val  {0};
      uint32_t pci__size     {0};

      if (value & 1) {  // Resource type IO

        unmasked_val = value & PCI::BASE_ADDRESS_IO_MASK;
        pci__size = pci_size(len, PCI::BASE_ADDRESS_IO_MASK & 0xFFFF);
      
        // Add it to resource list
        add_resource(new Resource(unmasked_val, pci__size), res_io_);
        assert(res_io_ != nullptr);        
      
      } else { //Resource type Mem

        unmasked_val = value & PCI::BASE_ADDRESS_MEM_MASK;
        pci__size = pci_size(len, PCI::BASE_ADDRESS_MEM_MASK);

        //Add it to resource list
        add_resource(new Resource(unmasked_val, pci__size), res_mem_);
        assert(res_mem_ != nullptr);
      }

      INFO2("");
      INFO2("[ Resource @ BAR %i ]", bar);
      INFO2("  Address:  0x%x Size: 0x%x", unmasked_val, pci__size);
      INFO2("  Type: %s", value & 1 ? "IO Resource" : "Memory Resource");   
    }
  
    INFO2("");
  }
Exemplo n.º 10
0
  void ACPI::walk_madt(const char* addr)
  {
    auto* hdr = (MADTHeader*) addr;
    INFO("ACPI", "Reading APIC information");
    // the base address for APIC registers
    INFO2("LAPIC base: 0x%x  (flags: 0x%x)",
        hdr->lapic_addr, hdr->flags);
    this->apic_base = (uintptr_t) hdr->lapic_addr;

    // the length remaining after MADT header
    int len = hdr->hdr.Length - sizeof(MADTHeader);
    // start walking
    const char* ptr = (char*) hdr->record;

    while (len) {
      auto* rec = (MADTRecord*) ptr;
      switch (rec->type) {
      case 0:
        {
          auto& lapic = *(LAPIC*) rec;
          lapics.push_back(lapic);
          //INFO2("-> CPU %u ID %u  (flags=0x%x)",
          //    lapic.cpu, lapic.id, lapic.flags);
        }
        break;
      case 1:
        {
          auto& ioapic = *(IOAPIC*) rec;
          ioapics.push_back(ioapic);
          INFO2("I/O APIC %u   ADDR 0x%x  INTR 0x%x",
              ioapic.id, ioapic.addr_base, ioapic.intr_base);
        }
        break;
      case 2:
        {
          auto& redirect = *(override_t*) rec;
          overrides.push_back(redirect);
          INFO2("IRQ redirect for bus %u from IRQ %u to VEC %u",
              redirect.bus_source, redirect.irq_source, redirect.global_intr);
        }
        break;
      default:
        debug("Unrecognized ACPI MADT type: %u\n", rec->type);
      }
      // decrease length as we go
      len -= rec->length;
      // go to next entry
      ptr += rec->length;
    }
    INFO("SMP", "Found %u APs", (uint32_t) lapics.size());
  }
Exemplo n.º 11
0
NativeCodePtr compile_me(Method* method)
{
    ASSERT_RAISE_AREA;
    ASSERT_NO_INTERPRETER;
    TRACE("compile_me " << method);

    GcFrame gc;
    compile_protect_arguments(method, &gc);

    if (exn_raised()) {
        return NULL;
     }

    tmn_suspend_enable();
    if (method->is_abstract()) { 
        compile_raise_exception("java/lang/AbstractMethodError", "", method); 
        tmn_suspend_disable(); 
        return NULL; 
    }

    DebugUtilsTI *ti = VM_Global_State::loader_env->TI;
    JIT_Result res = compile_do_compilation(method);

    if (res != JIT_SUCCESS) {
        INFO2("compile", "Cannot compile " << method);
        if (!exn_raised()) {
            compile_raise_exception("java/lang/InternalError", "Cannot compile ", method);
        }
        tmn_suspend_disable();
        return NULL;
    }
    tmn_suspend_disable();

    NativeCodePtr entry_point = method->get_code_addr();
    INFO2("compile.code", "Compiled method " << method
            << ", entry " << entry_point);

    if (method->get_pending_breakpoints() != 0)
        jvmti_set_pending_breakpoints(method);
    if(ti->isEnabled() && ti->is_single_step_enabled()
        && !method->is_native())
    {
        jvmti_thread_t jvmti_thread = jthread_self_jvmti();
        assert(jvmti_thread);
        jvmti_set_single_step_breakpoints_for_method(ti, jvmti_thread, method);
    }

    return entry_point;
} // compile_me
Exemplo n.º 12
0
static int command_move_clients (client_t *client, source_t *source, int response)
{
    const char *dest_source;
    xmlDocPtr doc;
    xmlNodePtr node;
    int parameters_passed = 0;
    char buf[255];

    if((COMMAND_OPTIONAL(client, "destination", dest_source))) {
        parameters_passed = 1;
    }
    if (!parameters_passed) {
        doc = admin_build_sourcelist(source->mount);
        thread_mutex_unlock (&source->lock);
        return admin_send_response(doc, client, response, "moveclients.xsl");
    }
    INFO2 ("source is \"%s\", destination is \"%s\"", source->mount, dest_source);

    doc = xmlNewDoc(XMLSTR("1.0"));
    node = xmlNewDocNode(doc, NULL, XMLSTR("iceresponse"), NULL);
    xmlDocSetRootElement(doc, node);

    source_set_fallback (source, dest_source);
    source->termination_count = source->listeners;
    source->flags |= SOURCE_LISTENERS_SYNC;

    snprintf (buf, sizeof(buf), "Clients moved from %s to %s",
            source->mount, dest_source);
    thread_mutex_unlock (&source->lock);
    xmlNewChild(node, NULL, XMLSTR("message"), XMLSTR(buf));
    xmlNewChild(node, NULL, XMLSTR("return"), XMLSTR("1"));

    return admin_send_response (doc, client, response, "response.xsl");
}
Exemplo n.º 13
0
void IRQ_manager::bsp_init()
{
  const auto WORDS_PER_BMP = IRQ_LINES / 32;
  auto* bmp = new MemBitmap::word[WORDS_PER_BMP * 3]();
  irq_subs.set_location(bmp + 0 * WORDS_PER_BMP, WORDS_PER_BMP);
  irq_pend.set_location(bmp + 1 * WORDS_PER_BMP, WORDS_PER_BMP);
  irq_todo.set_location(bmp + 2 * WORDS_PER_BMP, WORDS_PER_BMP);

  //Create an idt entry for the 'lidt' instruction
  idt_loc idt_reg;
  idt_reg.limit = INTR_LINES * sizeof(IDTDescr) - 1;
  idt_reg.base = (uint32_t)idt;

  INFO("INTR", "Creating interrupt handlers");

  for (size_t i = 0; i < 32; i++) {
    create_gate(&idt[i],exception_handler,default_sel,default_attr);
  }

  // Set all interrupt-gates >= 32 to unused do-nothing handler
  for (size_t i = 32; i < INTR_LINES; i++) {
    create_gate(&idt[i],unused_interrupt_handler,default_sel,default_attr);
  }

  // spurious interrupts (should not happen)
  // currently set to be the last interrupt vector
  create_gate(&idt[INTR_LINES-1], spurious_intr, default_sel, default_attr);

  INFO2("+ Default interrupt gates set for irq >= 32");

  // Load IDT
  asm volatile ("lidt %0": :"m"(idt_reg) );
}
Exemplo n.º 14
0
 uint16_t msix_t::setup_vector(uint8_t cpu, uint8_t intr)
 {
   // find free table entry
   uint16_t vec;
   for (vec = 0; vec < this->vectors(); vec++)
   {
     // read data register
     auto reg = get_entry(vec, ENT_MSG_DATA);
     if ((mm_read(reg) & 0xff) == 0) break;
   }
   assert (vec != this->vectors());
   
   // use free table entry
   INFO2("MSI-X vector %u pointing to cpu %u intr %u",
       vec, cpu, intr);
   
   // mask entry
   mask_entry(vec);
   
   mm_write(get_entry(vec, ENT_MSG_ADDR), msix_addr_single_cpu(cpu));
   mm_write(get_entry(vec, ENT_MSG_UPPER), 0x0);
   mm_write(get_entry(vec, ENT_MSG_DATA), msix_data_single_vector(intr));
   
   // unmask entry
   unmask_entry(vec);
   // return it
   return vec;
 }
Exemplo n.º 15
0
static void queue_auth_client (auth_client *auth_user, mount_proxy *mountinfo)
{
    auth_t *auth;

    if (auth_user == NULL)
        return;
    auth_user->next = NULL;
    if (mountinfo)
    {
        auth = mountinfo->auth;
        thread_mutex_lock (&auth->lock);
        if (auth_user->client)
            auth_user->client->auth = auth;
        auth->refcount++;
    }
    else
    {
        if (auth_user->client == NULL || auth_user->client->auth == NULL)
        {
            WARN1 ("internal state is incorrect for %p", auth_user->client);
            return;
        }
        auth = auth_user->client->auth;
        thread_mutex_lock (&auth->lock);
    }
    DEBUG2 ("...refcount on auth_t %s is now %d", auth->mount, auth->refcount);
    *auth->tailp = auth_user;
    auth->tailp = &auth_user->next;
    auth->pending_count++;
    INFO2 ("auth on %s has %d pending", auth->mount, auth->pending_count);
    thread_mutex_unlock (&auth->lock);
}
Exemplo n.º 16
0
struct bondStretch *
getBondStretch(int element1, int element2, char bondOrder)
{
  struct bondStretch *entry;
  char bondName[256]; // XXX Can overflow for long atom type names

  entry = getBondStretchEntry(element1, element2, bondOrder);
  if (entry->parameterQuality < QualityWarningLevel && !entry->warned) {
    if (!ComputedParameterWarning) {
      WARNING("Using a reduced quality parameter, see the trace output for details");
      ComputedParameterWarning = 1;
    }
    generateBondName(bondName, element1, element2, bondOrder);
    INFO2("Using quality %d parameters for %s stretch", entry->parameterQuality, bondName);
    INFO4("Computed ks: %e, r0: %e, de: %e, beta: %e",
          entry->ks,
          entry->r0,
          entry->de,
          entry->beta);
    entry->warned = 1;
  }
  if (entry->maxPhysicalTableIndex == -1) {
    // Only call initializeBondStretchInterpolater when we're actually
    // going to use it.  That way, we don't warn about too large of an
    // ExcessiveEnergyLevel
    initializeBondStretchInterpolater(entry);
  }
  return entry;
}
Exemplo n.º 17
0
/* FIXME:: the collection should be seperated from the alloation */
void *wspace_alloc(unsigned size, Allocator *allocator)
{
  void *p_obj = NULL;
  /*
  if( get_hyper_free_chunk_list()->head == NULL )
  	INFO2("gc.wspace", "[BEFORE ALLOC]hyper free chunk is EMPTY!!");
  */
  
  if(gc_is_specify_con_gc())
    gc_sched_collection(allocator->gc, GC_CAUSE_CONCURRENT_GC);
  
  /* First, try to allocate object from TLB (thread local chunk) */
  p_obj = wspace_try_alloc(size, allocator);
  if(p_obj){
    ((Mutator*)allocator)->new_obj_size += size;
    /*
    if( get_hyper_free_chunk_list()->head == NULL )
  	INFO2("gc.wspace", "[AFTER FIRST ALLOC]hyper free chunk is EMPTY!!");
    */
    return p_obj;
  }
  
  if(allocator->gc->in_collection) return NULL;
  
  vm_gc_lock_enum();
  /* after holding lock, try if other thread collected already */
  p_obj = wspace_try_alloc(size, allocator);
  if(p_obj){
    vm_gc_unlock_enum();
    ((Mutator*)allocator)->new_obj_size += size;
    /*
    if( get_hyper_free_chunk_list()->head == NULL )
  	INFO2("gc.wspace", "[AFTER SECOND ALLOC]hyper free chunk is EMPTY!!");
    */
    return p_obj;
  }
  
  INFO2("gc.con.info", "[Exhausted Cause] Allocation size is :" << size << " bytes");
  GC *gc = allocator->gc;
  /*
  gc->cause = GC_CAUSE_MOS_IS_FULL;
  if(gc_is_specify_con_gc())
    gc_relaim_heap_con_mode(gc);
  else*/ 
  gc_reclaim_heap(gc, GC_CAUSE_MOS_IS_FULL);
  vm_gc_unlock_enum();

#ifdef SSPACE_CHUNK_INFO
  printf("Failure size: %x\n", size);
#endif

  p_obj = wspace_try_alloc(size, allocator);
  /*
  if( get_hyper_free_chunk_list()->head == NULL )
  	INFO2("gc.wspace", "[AFTER COLLECTION ALLOC]hyper free chunk is EMPTY!!");
  */
  if(p_obj) ((Mutator*)allocator)->new_obj_size += size;
  
  return p_obj;
}
Exemplo n.º 18
0
int
mm_open(mm_file *mf, const char *fname)
{
    assert(mf);
    assert(fname);
    INFO2("opening file `%s'", fname);
    return mm_open_fp(mf, fopen(fname, "rb"));
}
Exemplo n.º 19
0
 inline static void disable_irq(const uint8_t irq) noexcept {
   irq_mask_ |= (1 << irq);
   if ((irq_mask_ & 0xFF00) == 0xFF00) {
     irq_mask_ |= (1 << 2);
   }
   set_intr_mask(irq_mask_);
   INFO2("- Disabling IRQ %i, mask: 0x%x", irq, irq_mask_);
 }
Exemplo n.º 20
0
void PCI_manager::init() {
  INFO("PCI Manager", "Probing PCI bus");

  /*
   * Probe the PCI bus
   * - Assuming bus number is 0, there are 255 possible addresses
   */
  uint32_t id {PCI::WTF};

  for (uint16_t pci_addr {0}; pci_addr < 255; ++pci_addr) {
    id = hw::PCI_Device::read_dword(pci_addr, PCI::CONFIG_VENDOR);

    if (id != PCI::WTF) {
      hw::PCI_Device dev {pci_addr, id};

      // store device
      devices_[dev.classcode()].emplace_back(dev);

      bool registered = false;
      // translate classcode to device and register
      switch(dev.classcode())
      {
        case PCI::STORAGE:
          registered = register_device<hw::Drive>(dev);
          break;

        case PCI::NIC:
          registered = register_device<hw::Nic>(dev);
          break;

        default:
        {

        }

      }
      debug("Device %s", registered ? "registed":"not registered");
    }
  }

  // Pretty printing, end of device tree
  // @todo should probably be moved, or optionally non-printed
  INFO2("|");
  INFO2("o");
}
Exemplo n.º 21
0
/* function to handle the re-populating of the avl tree containing IP addresses
 * for deciding whether a connection of an incoming request is to be dropped.
 */
static void recheck_ip_file (cache_file_contents *cache)
{
    time_t now = time(NULL);
    if (now >= cache->file_recheck)
    {
        struct stat file_stat;
        FILE *file = NULL;
        int count = 0;
        avl_tree *new_ips;
        char line [MAX_LINE_LEN];

        cache->file_recheck = now + 10;
        if (cache->filename == NULL)
        {
            if (cache->contents)
            {
                avl_tree_free (cache->contents, free_filtered_ip);
                cache->contents = NULL;
            }
            return;
        }
        if (stat (cache->filename, &file_stat) < 0)
        {
            WARN2 ("failed to check status of \"%s\": %s", cache->filename, strerror(errno));
            return;
        }
        if (file_stat.st_mtime == cache->file_mtime)
            return; /* common case, no update to file */

        cache->file_mtime = file_stat.st_mtime;

        file = fopen (cache->filename, "r");
        if (file == NULL)
        {
            WARN2("Failed to open file \"%s\": %s", cache->filename, strerror (errno));
            return;
        }

        new_ips = avl_tree_new (compare_ip, NULL);

        while (get_line (file, line, MAX_LINE_LEN))
        {
            char *str;
            if(!line[0] || line[0] == '#')
                continue;
            count++;
            str = strdup (line);
            if (str)
                avl_insert (new_ips, str);
        }
        fclose (file);
        INFO2 ("%d entries read from file \"%s\"", count, cache->filename);

        if (cache->contents) avl_tree_free (cache->contents, free_filtered_ip);
        cache->contents = new_ips;
    }
}
Exemplo n.º 22
0
/*! \fn printVector
 *
 *  \param [in]  [vector]
 *  \param [in]  [size]
 *  \param [in]  [logLevel]
 *  \param [in]  [name]
 *
 *  \author wbraun
 */
static void printVector(const double *vector, integer *size, const int logLevel, const char *name)
{
  int i;

  INFO1(logLevel, "%s", name);
  INDENT(logLevel);
  for(i=0; i<*size; i++)
    INFO2(logLevel, "[%d] %10g", i, vector[i]);
  RELEASE(logLevel);
}
Exemplo n.º 23
0
inline static void partial_stop_the_world_info( unsigned int type, unsigned int pause_time ) {
  switch( type ) {
    case GC_PARTIAL_PSTW :
      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PSTW=" << pause_time << " us");
      break;
    case GC_PARTIAL_PMSS :
      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), PMSS=" << pause_time << " us");
      break;
    case GC_PARTIAL_CMPS :
      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMPS=" << pause_time << " us");
      break;
    case GC_PARTIAL_CMSS :
      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), CMSS=" << pause_time << " us");
      break;
    case GC_PARTIAL_FCSR :
      INFO2("gc.con.time","[PT] pause ( Heap exhuasted ), FCSR=" << pause_time << " us");
      break;
  }
}
Exemplo n.º 24
0
void elf_protect_symbol_areas()
{
  char* src = (char*) parser.symtab.base;
  ptrdiff_t size = &parser.strtab.base[parser.strtab.size] - src;
  if (size & 4095) size += 4096 - (size & 4095);
  INFO2("* Protecting syms %p to %p (size %#zx)",
        src, &parser.strtab.base[parser.strtab.size], size);

  //os::mem::protect((uintptr_t) src, size, os::mem::Access::read);
}
Exemplo n.º 25
0
void OS::multiboot(uint32_t boot_magic, uint32_t boot_addr){
  MYINFO("Booted with multiboot");
  INFO2("* magic value: 0x%x Multiboot info at 0x%x", boot_magic, boot_addr);

  multiboot_info_t* bootinfo = (multiboot_info_t*) boot_addr;

  if (! bootinfo->flags & MULTIBOOT_INFO_MEMORY) {
    INFO2("* No memory info provided in multiboot info");
    return;
  }

  uint32_t mem_low_start = 0;
  uint32_t mem_low_end = (bootinfo->mem_lower * 1024) - 1;
  uint32_t mem_low_kb = bootinfo->mem_lower;
  uint32_t mem_high_start = 0x100000;
  uint32_t mem_high_end = mem_high_start + (bootinfo->mem_upper * 1024) - 1;
  uint32_t mem_high_kb = bootinfo->mem_upper;

  OS::low_memory_size_ = mem_low_kb * 1024;
  OS::high_memory_size_ = mem_high_kb * 1024;

  INFO2("* Valid memory (%i Kib):", mem_low_kb + mem_high_kb);
  INFO2("\t 0x%08x - 0x%08x (%i Kib)",
        mem_low_start, mem_low_end, mem_low_kb);
  INFO2("\t 0x%08x - 0x%08x (%i Kib)",
        mem_high_start, mem_high_end, mem_high_kb);
  INFO2("");

  if (bootinfo->flags & MULTIBOOT_INFO_CMDLINE) {
    os_cmdline = (char*) bootinfo->cmdline;
    INFO2("* Booted with parameters: %s", os_cmdline.c_str());
  }

  if (bootinfo->flags & MULTIBOOT_INFO_MEM_MAP) {
    INFO2("* Multiboot provided memory map  (%i entries)",bootinfo->mmap_length / sizeof(multiboot_memory_map_t));
    gsl::span<multiboot_memory_map_t> mmap { reinterpret_cast<multiboot_memory_map_t*>(bootinfo->mmap_addr),
        (int)(bootinfo->mmap_length / sizeof(multiboot_memory_map_t))};

    for (auto map : mmap) {
      const char* str_type = map.type & MULTIBOOT_MEMORY_AVAILABLE ? "FREE" : "RESERVED";
      INFO2("\t 0x%08llx - 0x%08llx %s (%llu Kb.)",
            map.addr, map.addr + map.len - 1, str_type, map.len / 1024 );
      /*if (map.addr + map.len > mem_high_end)
        break;*/
    }
    printf("\n");
  }
}
Exemplo n.º 26
0
void gc_con_stat_information_out(GC *gc)
{
  Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
  INFO2("gc.con.scheduler", "[Reset] surviving_at_end:       "<<con_collection_stat->surviving_size_at_gc_end<<" bytes");
  INFO2("gc.con.scheduler", "[Reset] alloc_rate:      "<<con_collection_stat->alloc_rate<<" b/ms");
  INFO2("gc.con.scheduler", "[Reset] utilization_rate:      "<<con_collection_stat->heap_utilization_rate);
  INFO2("gc.con.scheduler", "[Reset] trace_rate:      "<<con_collection_stat->trace_rate<<" b/ms");
  INFO2("gc.con.scheduler", "[Reset] sweeping time:      "<<con_collection_stat->sweeping_time<<" us");
  INFO2("gc.con.scheduler", "[Reset] gc time:      "<< trans_time_unit(con_collection_stat->gc_end_time - con_collection_stat->gc_start_time) );
  INFO2("gc.con.scheduler","=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=");
}
Exemplo n.º 27
0
/** Find and open file, if found return full path.
 * Caller is responsible for freeing the memory.
 */
char*
locate_file(const char *name, int type)
{
	file f = try_find_file(name, "rb", type);
	if (f.handle)
	{
		INFO2("found file `%s'", f.path);
		fclose(f.handle);
	}
	return f.path;
}
Exemplo n.º 28
0
static void logReadyProfile(const std::string& catName, const std::string& profilerName, EBMethodProfile* mp) {
    const char* methodName = method_get_name(mp->mh);
    Class_Handle ch = method_get_class(mp->mh);
    const char* className = class_get_name(ch);
    const char* signature = method_get_descriptor(mp->mh);

    std::ostringstream msg;
    msg <<"EM: profiler["<<profilerName.c_str()<<"] profile is ready [e:"
        << mp->entryCounter <<" b:"<<mp->backedgeCounter<<"] "
        <<className<<"::"<<methodName<<signature;
    INFO2(catName.c_str(), msg.str().c_str());
}
Exemplo n.º 29
0
/* general send routine per listener.  The deletion_expected tells us whether
 * the last in the queue is about to disappear, so if this client is still
 * referring to it after writing then drop the client as it's fallen too far
 * behind 
 */ 
static void send_to_listener (source_t *source, client_t *client, int deletion_expected)
{
    int bytes;
    int loop = 10;   /* max number of iterations in one go */
    int total_written = 0;

    while (1)
    {
        /* check for limited listener time */
        if (client->con->discon_time)
            if (time(NULL) >= client->con->discon_time)
            {
                INFO1 ("time limit reached for client #%lu", client->con->id);
                client->con->error = 1;
            }

        /* jump out if client connection has died */
        if (client->con->error)
            break;

        /* lets not send too much to one client in one go, but don't
           sleep for too long if more data can be sent */
        if (total_written > 20000 || loop == 0)
        {
            if (client->check_buffer != format_check_file_buffer)
                source->short_delay = 1;
            break;
        }

        loop--;

        if (client->check_buffer (source, client) < 0)
            break;

        bytes = client->write_to_client (client);
        if (bytes <= 0)
            break;  /* can't write any more */

        total_written += bytes;
    }
    source->format->sent_bytes += total_written;

    /* the refbuf referenced at head (last in queue) may be marked for deletion
     * if so, check to see if this client is still referring to it */
    if (deletion_expected && client->refbuf && client->refbuf == source->stream_data)
    {
        INFO2 ("Client %lu (%s) has fallen too far behind, removing",
                client->con->id, client->con->ip);
        stats_event_inc (source->mount, "slow_listeners");
        client->con->error = 1;
    }
}
Exemplo n.º 30
0
// only when current sweep is set to false
static void gc_partial_con_CMSS(GC *gc) {
  
  INFO2("gc.con.info", "[CMSS] Heap has been exhuasted, current collection = " << gc->num_collections );
  gc_disable_alloc_obj_live(gc); // in the STW manner, so we can disable it at anytime before the mutators are resumed

  /*just debugging*/
    gc_ms_get_current_heap_usage((GC_MS *)gc);
    Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc);
    unsigned int from_marking_end = (unsigned int)(time_now() - con_collection_stat->marking_end_time);
    INFO2("gc.con.info", "[CMSS] marking early time [" << from_marking_end << "] us" );

  gc_con_update_stat_heap_exhausted(gc); // calculate util rate
  
  // start reclaiming heap, it will skip the marking phase
  gc_reset_mutator_context(gc);
  if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc);
  gc_ms_reclaim_heap((GC_MS*)gc);
  
  // reset after partial stop the world collection
  gc_reset_after_con_collection(gc);
  set_con_nil(gc);
}