Ejemplo n.º 1
0
void
initThreadLabelTable(void)
{
  if (threadLabels == NULL) {
    threadLabels = allocHashTable();
  }
}
Ejemplo n.º 2
0
extern void DEBUG_LoadSymbols( const char *name )
{
    bfd* abfd;
    char **matching;

    bfd_init();
    abfd = bfd_openr(name, "default");
    if (abfd == NULL) {
        barf("can't open executable %s to get symbol table", name);
    }
    if (!bfd_check_format_matches (abfd, bfd_object, &matching)) {
        barf("mismatch");
    }

    {
        long storage_needed;
        asymbol **symbol_table;
        long number_of_symbols;
        long num_real_syms = 0;
        long i;

        storage_needed = bfd_get_symtab_upper_bound (abfd);

        if (storage_needed < 0) {
            barf("can't read symbol table");
        }
        symbol_table = (asymbol **) stgMallocBytes(storage_needed,"DEBUG_LoadSymbols");

        number_of_symbols = bfd_canonicalize_symtab (abfd, symbol_table);

        if (number_of_symbols < 0) {
            barf("can't canonicalise symbol table");
        }

        if (add_to_fname_table == NULL)
            add_to_fname_table = allocHashTable();

        for( i = 0; i != number_of_symbols; ++i ) {
            symbol_info info;
            bfd_get_symbol_info(abfd,symbol_table[i],&info);
            if (isReal(info.type, info.name)) {
                insertHashTable(add_to_fname_table,
                                info.value, (void*)info.name);
                num_real_syms += 1;
            }
        }

        IF_DEBUG(interpreter,
                 debugBelch("Loaded %ld symbols. Of which %ld are real symbols\n",
                         number_of_symbols, num_real_syms)
                 );

        stgFree(symbol_table);
    }
}
Ejemplo n.º 3
0
void
initThreadLabelTable(void)
{
#if defined(THREADED_RTS)
  initMutex(&threadLabels_mutex);
#endif /* THREADED_RTS */

  if (threadLabels == NULL) {
    threadLabels = allocHashTable();
  }
}
Ejemplo n.º 4
0
AdjustorWritable allocateExec(W_ bytes, AdjustorExecutable *exec_ret)
{
    AdjustorWritable writ;
    ffi_closure* cl;
    if (bytes != sizeof(ffi_closure)) {
        barf("allocateExec: for ffi_closure only");
    }
    ACQUIRE_SM_LOCK;
    cl = writ = ffi_closure_alloc((size_t)bytes, exec_ret);
    if (cl != NULL) {
        if (allocatedExecs == NULL) {
            allocatedExecs = allocHashTable();
        }
        insertHashTable(allocatedExecs, (StgWord)*exec_ret, writ);
    }
    RELEASE_SM_LOCK;
    return writ;
}
Ejemplo n.º 5
0
Archivo: Stable.c Proyecto: Eufavn/ghc
void
initStablePtrTable(void)
{
	if (SPT_size > 0)
		return;

    SPT_size = INIT_SPT_SIZE;
    stable_ptr_table = stgMallocBytes(SPT_size * sizeof(snEntry),
				      "initStablePtrTable");

    /* we don't use index 0 in the stable name table, because that
     * would conflict with the hash table lookup operations which
     * return NULL if an entry isn't found in the hash table.
     */
    initFreeList(stable_ptr_table+1,INIT_SPT_SIZE-1,NULL);
    addrToStableHash = allocHashTable();

#ifdef THREADED_RTS
    initMutex(&stable_mutex);
#endif
}
Ejemplo n.º 6
0
Archivo: Stable.c Proyecto: Eufavn/ghc
void
updateStablePtrTable(rtsBool full)
{
    snEntry *p, *end_stable_ptr_table;
    
    if (full && addrToStableHash != NULL) {
	freeHashTable(addrToStableHash,NULL);
	addrToStableHash = allocHashTable();
    }
    
    end_stable_ptr_table = &stable_ptr_table[SPT_size];
    
    // NOTE: _starting_ at index 1; index 0 is unused.
    for (p = stable_ptr_table + 1; p < end_stable_ptr_table; p++) {
	
	if (p->addr == NULL) {
	    if (p->old != NULL) {
		// The target has been garbage collected.  Remove its
		// entry from the hash table.
		removeHashTable(addrToStableHash, (W_)p->old, NULL);
		p->old = NULL;
	    }
	}
	else if (p->addr < (P_)stable_ptr_table 
		 || p->addr >= (P_)end_stable_ptr_table) {
	    // Target still alive, Re-hash this stable name 
	    if (full) {
		insertHashTable(addrToStableHash, (W_)p->addr, 
				(void *)(p - stable_ptr_table));
	    } else if (p->addr != p->old) {
		removeHashTable(addrToStableHash, (W_)p->old, NULL);
		insertHashTable(addrToStableHash, (W_)p->addr, 
				(void *)(p - stable_ptr_table));
	    }
	}
    }
}
Ejemplo n.º 7
0
//
// Check whether we can unload any object code.  This is called at the
// appropriate point during a GC, where all the heap data is nice and
// packed together and we have a linked list of the static objects.
//
// The check involves a complete heap traversal, but you only pay for
// this (a) when you have called unloadObj(), and (b) at a major GC,
// which is much more expensive than the traversal we're doing here.
//
void checkUnload (StgClosure *static_objects)
{
  nat g, n;
  HashTable *addrs;
  StgClosure* p;
  const StgInfoTable *info;
  ObjectCode *oc, *prev, *next;
  gen_workspace *ws;
  StgClosure* link;

  if (unloaded_objects == NULL) return;

  ACQUIRE_LOCK(&linker_unloaded_mutex);

  // Mark every unloadable object as unreferenced initially
  for (oc = unloaded_objects; oc; oc = oc->next) {
      IF_DEBUG(linker, debugBelch("Checking whether to unload %" PATH_FMT "\n",
                                  oc->fileName));
      oc->referenced = rtsFalse;
  }

  addrs = allocHashTable();

  for (p = static_objects; p != END_OF_STATIC_OBJECT_LIST; p = link) {
      p = UNTAG_STATIC_LIST_PTR(p);
      checkAddress(addrs, p);
      info = get_itbl(p);
      link = *STATIC_LINK(info, p);
  }

  // CAFs on revertible_caf_list are not on static_objects
  for (p = (StgClosure*)revertible_caf_list;
       p != END_OF_CAF_LIST;
       p = ((StgIndStatic *)p)->static_link) {
      p = UNTAG_STATIC_LIST_PTR(p);
      checkAddress(addrs, p);
  }

  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
      searchHeapBlocks (addrs, generations[g].blocks);
      searchHeapBlocks (addrs, generations[g].large_objects);

      for (n = 0; n < n_capabilities; n++) {
          ws = &gc_threads[n]->gens[g];
          searchHeapBlocks(addrs, ws->todo_bd);
          searchHeapBlocks(addrs, ws->part_list);
          searchHeapBlocks(addrs, ws->scavd_list);
      }
  }

#ifdef PROFILING
  /* Traverse the cost centre tree, calling checkAddress on each CCS/CC */
  searchCostCentres(addrs, CCS_MAIN);

  /* Also check each cost centre in the CC_LIST */
  CostCentre *cc;
  for (cc = CC_LIST; cc != NULL; cc = cc->link) {
      checkAddress(addrs, cc);
  }
#endif /* PROFILING */

  // Look through the unloadable objects, and any object that is still
  // marked as unreferenced can be physically unloaded, because we
  // have no references to it.
  prev = NULL;
  for (oc = unloaded_objects; oc; oc = next) {
      next = oc->next;
      if (oc->referenced == 0) {
          if (prev == NULL) {
              unloaded_objects = oc->next;
          } else {
              prev->next = oc->next;
          }
          IF_DEBUG(linker, debugBelch("Unloading object file %" PATH_FMT "\n",
                                      oc->fileName));
          freeObjectCode(oc);
      } else {
          IF_DEBUG(linker, debugBelch("Object file still in use: %"
                                      PATH_FMT "\n", oc->fileName));
          prev = oc;
      }
  }

  freeHashTable(addrs, NULL);

  RELEASE_LOCK(&linker_unloaded_mutex);
}
Ejemplo n.º 8
0
//
// Check whether we can unload any object code.  This is called at the
// appropriate point during a GC, where all the heap data is nice and
// packed together and we have a linked list of the static objects.
//
// The check involves a complete heap traversal, but you only pay for
// this (a) when you have called unloadObj(), and (b) at a major GC,
// which is much more expensive than the traversal we're doing here.
//
void checkUnload (StgClosure *static_objects)
{
  nat g, n;
  HashTable *addrs;
  StgClosure* p;
  const StgInfoTable *info;
  ObjectCode *oc, *prev, *next;
  gen_workspace *ws;
  StgClosure* link;

  if (unloaded_objects == NULL) return;

  // Mark every unloadable object as unreferenced initially
  for (oc = unloaded_objects; oc; oc = oc->next) {
      IF_DEBUG(linker, debugBelch("Checking whether to unload %" PATH_FMT "\n",
                                  oc->fileName));
      oc->referenced = rtsFalse;
  }

  addrs = allocHashTable();

  for (p = static_objects; p != END_OF_STATIC_LIST; p = link) {
      checkAddress(addrs, p);
      info = get_itbl(p);
      link = *STATIC_LINK(info, p);
  }

  for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
      searchHeapBlocks (addrs, generations[g].blocks);
      searchHeapBlocks (addrs, generations[g].large_objects);

      for (n = 0; n < n_capabilities; n++) {
          ws = &gc_threads[n]->gens[g];
          searchHeapBlocks(addrs, ws->todo_bd);
          searchHeapBlocks(addrs, ws->part_list);
          searchHeapBlocks(addrs, ws->scavd_list);
      }
  }

  // Look through the unloadable objects, and any object that is still
  // marked as unreferenced can be physically unloaded, because we
  // have no references to it.
  prev = NULL;
  for (oc = unloaded_objects; oc; prev = oc, oc = next) {
      next = oc->next;
      if (oc->referenced == 0) {
          if (prev == NULL) {
              unloaded_objects = oc->next;
          } else {
              prev->next = oc->next;
          }
          IF_DEBUG(linker, debugBelch("Unloading object file %" PATH_FMT "\n",
                                      oc->fileName));
          freeObjectCode(oc);
      } else {
          IF_DEBUG(linker, debugBelch("Object file still in use: %"
                                      PATH_FMT "\n", oc->fileName));
      }
  }

  freeHashTable(addrs, NULL);
}
Ejemplo n.º 9
0
void
initFileLocking(void)
{
    obj_hash = allocHashTable_(hashLock, cmpLocks);
    fd_hash  = allocHashTable(); /* ordinary word-based table */
}