Пример #1
0
static inline STORAGE_HANDLE *get_storage_handle(CTXTdeclc Cell name, prolog_term trie_type)
{
    STORAGE_HANDLE *handle_cell;

    handle_cell = find_or_insert_storage_handle(name);
    /* new buckets are filled out with 0's by the calloc in hashtable_xsb.c */
    if (handle_cell->handle==(Cell)0) {
        /* initialize new handle */
        xsb_dbgmsg((LOG_STORAGE,
                    "GET_STORAGE_HANDLE: New trie created for %s\n",
                    string_val(name)));
        if (is_int(trie_type))
            handle_cell->handle= newtrie(CTXTc (int)p2c_int(trie_type));
        else
            xsb_abort("[GET_STORAGE_HANDLE] trie type (3d arg) must be an integer");

        /* Note: not necessary to initialize snapshot_number&changed: handle_cell
           was calloc()'ed
           handle_cell->snapshot_number=0;
           handle_cell->changed=FALSE;
        */
    }
    else
        xsb_dbgmsg((LOG_STORAGE,
                    "GET_STORAGE_HANDLE: Using existing trie for %s\n",
                    string_val(name)));
    return handle_cell;
}
Пример #2
0
void do_delay_stuff(NODEptr as_leaf, VariantSF subgoal, xsbBool sf_exists)
{
    ASI	asi;
    DL dl = NULL;

#ifdef DEBUG_DELAYVAR
    xsb_dbgmsg((LOG_DEBUG, ">>>> Start do_delay_stuff ...\n"));
    xsb_dbgmsg((LOG_DEBUG, ">>>> The delay list for this subgoal itself is:\n"));
    xsb_dbgmsg((LOG_DEBUG, ">>>> ")); 
    dbg_print_delay_list(LOG_DEBUG,stddbg, delayreg);
    xsb_dbgmsg((LOG_DEBUG, "\n"));
#endif

    if (delayreg && (!sf_exists || is_conditional_answer(as_leaf))) {
      if ((dl = intern_delay_list(delayreg)) != NULL) {
	mark_conditional_answer(as_leaf, subgoal, dl);
	record_de_usage(dl);
      }
    }
    /*
     * Check for the derivation of an unconditional answer.
     */
    if (sf_exists && is_conditional_answer(as_leaf) &&
	(!delayreg || !dl)) {
      /*
       * Initiate positive simplification in places where this answer
       * substitution has already been returned.
       */
      simplify_pos_unconditional(as_leaf);
    }
    if (is_unconditional_answer(as_leaf) && subg_nde_list(subgoal)) {
      simplify_neg_succeeds(subgoal);
    }
}
Пример #3
0
void simplify_neg_fails(VariantSF subgoal)
{
  PNDE nde;
  DE de;
  DL dl;

#ifdef DEBUG_DELAYVAR
  xsb_dbgmsg((LOG_DEBUG, ">>>> start simplify_neg_fails()\n"));
  xsb_dbgmsg((LOG_DEBUG, ">>>> the subgoal is: "));
  dbg_print_subgoal(LOG_DEBUG,stddbg, subgoal); 
  xsb_dbgmsg((LOG_DEBUG, "\n"));
#endif

  while ((nde = subg_nde_list(subgoal))) {
    de = pnde_de(nde);
    dl = pnde_dl(nde);
    remove_pnde(subg_nde_list(subgoal), nde);
    if (!remove_de_from_dl(de, dl))
      handle_empty_dl_creation(dl);
  }

#ifdef DEBUG_DELAYVAR
  fprintf(stddbg, ">>>> end simplify_neg_fails()\n");
#endif
  
}
Пример #4
0
xsbBucket *search_bucket(Cell name,
			 xsbHashTable *table,
			 enum xsbHashSearchOp search_op)
{
  xsbBucket *bucket, *prev;

  if (! table->initted) init_hashtable(table);

  prev = NULL;
  bucket = get_top_bucket(table,table_hash(name,table->length));
  while (bucket && bucket->name) {
    if (bucket->name == name) {
      if (search_op == hashtable_delete) {
	if (!prev) {
	  /* if deleting a top bucket, copy the next bucket into the top one
	     and delete that next bucket. If no next, then just nullify name */
	  prev = bucket;
	  bucket=bucket->next;
	  if (bucket) {
	    /* use memcpy() because client bucket might have extra fields */
	    memcpy(prev, bucket, table->bucket_size);
	    free(bucket);
	  } else {
	    mark_bucket_free(prev,table->bucket_size);
	    xsb_dbgmsg((LOG_HASHTABLE,
		       "SEARCH_BUCKET: Destroying storage handle for %s\n",
		       string_val(name)));
	    xsb_dbgmsg((LOG_HASHTABLE, 
		       "SEARCH_BUCKET: Bucket nameptr is %p, next bucket %p\n",
		       prev->name, prev->next));
	  }
	} else {
	  /* Not top bucket: rearrange pointers & free space */
	  prev->next = bucket->next;
	  free(bucket);
	}
	return NULL;
      } else
	return bucket;
    }
    prev = bucket;
    bucket = bucket->next;
  }
  /* not found */
  if (search_op != hashtable_insert) return NULL;
  /* else create new bucket */
  /* calloc nullifies the allocated space; CLIENTS RELY ON THIS */
  if (!bucket) { /* i.e., it is not a top bucket */
    bucket = (xsbBucket *)calloc(1,table->bucket_size);
    if (!bucket)
      xsb_exit("Out of Memory: Can't allocate hash bucket");
    prev->next = bucket;
    /* NOTE: not necessary to nullify bucket->next because of calloc() */
  }
  bucket->name = name;
  return bucket;
}
Пример #5
0
STORAGE_HANDLE *storage_builtin(CTXTdeclc int builtin_number, Cell name, prolog_term trie_type)
{
    switch (builtin_number) {
    case GET_STORAGE_HANDLE:
        return get_storage_handle(CTXTc name, trie_type);
    case INCREMENT_STORAGE_SNAPSHOT:
        return increment_storage_snapshot(CTXTc name);
    case MARK_STORAGE_CHANGED:
        return mark_storage_changed(CTXTc name);
    case DESTROY_STORAGE_HANDLE: {
        xsb_dbgmsg((LOG_STORAGE,
                    "STORAGE_BUILTIN: Destroying storage handle for %s\n",
                    string_val(name)));
        destroy_storage_handle(name);
        return NULL;
    }
    case SHOW_TABLE_STATE: {
        show_table_state();
        return NULL;
    }
    default:
        xsb_abort("Unknown storage builtin");
        return NULL;
    }
}
Пример #6
0
/* context here is of type USERDATA */
PRIVATE void rdf_new_triple_handler (HTRDF *rdfp, HTTriple *t, void *context)
{
  USERDATA *userdata = (USERDATA *)context;

#ifdef LIBWWW_DEBUG
  xsb_dbgmsg((LOG_DEBUG,"***In rdf_new_triple_handler(%s)", RequestID(userdata->request)));
#endif
  /* create a new triple */
  if (rdfp && t) {
    prolog_term ptriple = extern_p2p_car(userdata->parsed_term_tail);

    extern_c2p_functor("rdftriple",3,ptriple);
    if (HTTriple_predicate(t))
      extern_c2p_string(HTTriple_predicate(t), extern_p2p_arg(ptriple,1));
    else
      extern_c2p_string("rdfunknown", extern_p2p_arg(ptriple,1));
    if (HTTriple_subject(t))
      extern_c2p_string(HTTriple_subject(t), extern_p2p_arg(ptriple,2));
    else
      extern_c2p_string("rdfunknown", extern_p2p_arg(ptriple,2));
    if (HTTriple_object(t))
      extern_c2p_string(HTTriple_object(t), extern_p2p_arg(ptriple,3));
    else
      extern_c2p_string("rdfunknown", extern_p2p_arg(ptriple,3));

#ifdef LIBWWW_DEBUG_VERBOSE
    print_prolog_term(userdata->parsed_term_tail, "Current result tail");
#endif

    userdata->parsed_term_tail = extern_p2p_cdr(userdata->parsed_term_tail);
    extern_c2p_list(userdata->parsed_term_tail);
  }
}
Пример #7
0
void dsExpand(DynamicStack *ds, int num_frames) {

  size_t new_size, total_bytes;
  char *new_base;

  if ( num_frames < 1 )
    return;
  if ( DynStk_CurSize(*ds) > 0 )
    new_size = 2 * DynStk_CurSize(*ds);
  else
    new_size = DynStk_InitSize(*ds);
  if ( new_size < DynStk_CurSize(*ds) + num_frames )
    new_size = new_size + num_frames;

  xsb_dbgmsg((LOG_TRIE_STACK, "Expanding %s: %d -> %d", DynStk_Name(*ds),
	     DynStk_CurSize(*ds), new_size));
  dbg_dsPrint(LOG_TRIE_STACK, *ds, "Before expansion");

  total_bytes = new_size * DynStk_FrameSize(*ds);
  new_base = realloc(DynStk_Base(*ds),total_bytes);
  if ( IsNULL(new_base) )
    xsb_abort("Ran out of memory during expansion of %s", DynStk_Name(*ds));
  DynStk_Top(*ds) =
    new_base + ((char *)DynStk_Top(*ds) - (char *)DynStk_Base(*ds));
  DynStk_Base(*ds) = new_base;
  DynStk_Ceiling(*ds) = new_base + total_bytes;
  DynStk_CurSize(*ds) = new_size;

  dbg_dsPrint(LOG_TRIE_STACK, *ds, "After expansion");
}
Пример #8
0
void dsPrint(DynamicStack ds, char *comment) {

  xsb_dbgmsg((LOG_DEBUG, "Dynamic Stack: %s (%s)\n"
	     "  Stack Base:    %8p\tFrame Size:   %u bytes\n"
	     "  Stack Top:     %8p\tCurrent Size: %u frames\n"
	     "  Stack Ceiling: %8p\tInitial Size: %u frames",
	     DynStk_Name(ds), comment,
	     DynStk_Base(ds), DynStk_FrameSize(ds),
	     DynStk_Top(ds), DynStk_CurSize(ds),
	     DynStk_Ceiling(ds), DynStk_InitSize(ds)));
}
Пример #9
0
static void handle_empty_dl_creation(DL dl)
{
  NODEptr as_leaf = dl_asl(dl);
  ASI asi = Delay(as_leaf);
  VariantSF subgoal;

#ifdef DEBUG_DELAYVAR
  fprintf(stddbg, ">>>> start handle_empty_dl_creation()\n");
#endif
  /*
   * Only when `as_leaf' is still a conditional answer can we do
   * remove_dl_from_dl_list(), simplify_pos_unconditional(), and
   * simplify_neg_succeeds() here.
   *
   * If `as_leaf' is already marked UNCONDITIONAL (by
   * unmark_conditional_answer(as_leaf) in simplify_pos_unconditional()),
   * that means this is the second time when `as_leaf' becomes
   * unconditional. So we don't need do anything.  All the DLs have been
   * released in the first time.
   */
  if (is_conditional_answer(as_leaf)) {	/* if it is still conditional */
    remove_dl_from_dl_list(dl, asi);
    subgoal = asi_subgoal(Delay(as_leaf));
#ifdef DEBUG_DELAYVAR
    xsb_dbgmsg((LOG_DEBUG, ">>>> the subgoal is:"));
    dbg_print_subgoal(LOG_DEBUG,stddbg, subgoal); 
    xsb_dbgmsg((LOG_DEBUG, "\n"));
#endif
    /*
     * simplify_pos_unconditional(as_leaf) will release all other DLs for
     * as_leaf, and mark as_leaf as UNCONDITIONAL.
     */
    simplify_pos_unconditional(as_leaf);
    /*-- perform early completion if necessary; please preserve invariants --*/
    if (!is_completed(subgoal) && most_general_answer(as_leaf)) {
      perform_early_completion(subgoal, subg_cp_ptr(subgoal));
      subg_compl_susp_ptr(subgoal) = NULL;
    }
    simplify_neg_succeeds(subgoal);
  }
}
Пример #10
0
PRIVATE void rdf_delete_userData(void *userdata)
{
  prolog_term parsed_result, status_term;
  USERDATA *me = (USERDATA *)userdata;
  HTRequest *request = me->request;

  if (request) {
    parsed_result =
      ((REQUEST_CONTEXT *)HTRequest_context(request))->request_result;
    status_term =
      ((REQUEST_CONTEXT *)HTRequest_context(request))->status_term;
  }
  else return;

#ifdef LIBWWW_DEBUG
  xsb_dbgmsg((LOG_DEBUG,"***In rdf_delete_userData(%s)", RequestID(request)));
#endif

#ifdef LIBWWW_DEBUG_VERBOSE
  print_prolog_term(me->parsed_term, "Current parse value");
#endif

  /* terminate the parsed prolog terms list */
  extern_c2p_nil(me->parsed_term_tail);

  /* pass the result to the outside world */
  if (is_var(me->parsed_term))
    extern_p2p_unify(parsed_result, me->parsed_term);
  else
    xsb_abort("[LIBWWW_REQUEST] Request %s: Arg 4 (Result) must be unbound variable",
	      RequestID(request));

  HT_FREE(me);

#ifdef LIBWWW_DEBUG
  xsb_dbgmsg((LOG_DEBUG,"***Request %s: freed the USERDATA object", RequestID(request)));
#endif

  return;
}
Пример #11
0
PRIVATE USERDATA *rdf_create_userData(HTRDF     *parser,
				      HTRequest *request,
				      HTStream  *target_stream)
{
  USERDATA *me = NULL;
#ifdef LIBWWW_DEBUG
  xsb_dbgmsg((LOG_DEBUG,"***Start rdf_create_userData: Request %s", RequestID(request)));
#endif
  if (parser) {
    /* make sure that MIME type is appropriate for RDF */
    if (!verifyMIMEformat(request, RDFPARSE)) {
      /*
	HTStream * input = HTRequest_inputStream(request);
	(*input->isa->abort)(input, NULL);
	HTRequest_setInputStream(request,NULL);
	HTRequest_kill(request);
	return NULL;
      */
      xsb_abort("[LIBWWW_REQUEST] Bug: Request type/MIME type mismatch");
    }
    if ((me = (USERDATA *) HT_CALLOC(1, sizeof(USERDATA))) == NULL)
      HT_OUTOFMEM("libwww_parse_rdf");
    me->delete_method = rdf_delete_userData;
    me->parser = parser;
    me->request = request;
    me->target = target_stream;
    me->parsed_term = extern_p2p_new();
    extern_c2p_list(me->parsed_term);
    me->parsed_term_tail = me->parsed_term;
  }
  
#ifdef LIBWWW_DEBUG
  xsb_dbgmsg((LOG_DEBUG,"***End rdf_create_userData: Request %s", RequestID(request)));
#endif

  /* Hook up userdata to the request context */
  ((REQUEST_CONTEXT *)HTRequest_context(request))->userdata = (void *)me;

  return me;
}
Пример #12
0
NodeStats subgoal_statistics(CTXTdeclc Structure_Manager *sm) {

  NodeStats sg_stats;
  TIFptr tif;
  int nSubgoals;
  VariantSF pProdSF;
  SubConsSF pConsSF;

  sg_stats = node_statistics(sm);
  nSubgoals = 0;
  SYS_MUTEX_LOCK( MUTEX_TABLE );				
  if ( sm == &smVarSF ) {
    for ( tif = tif_list.first;  IsNonNULL(tif);  tif = TIF_NextTIF(tif) )
      if ( IsVariantPredicate(tif) )
	for ( pProdSF = TIF_Subgoals(tif);  IsNonNULL(pProdSF);
	      pProdSF = (VariantSF)subg_next_subgoal(pProdSF) )
	  nSubgoals++;
  }
  /* No shared smProdSF or smConsSF in MT engine */
  else if ( sm == &smProdSF ) {
    for ( tif = tif_list.first;  IsNonNULL(tif);  tif = TIF_NextTIF(tif) )
      if ( IsSubsumptivePredicate(tif) )
	for ( pProdSF = TIF_Subgoals(tif);  IsNonNULL(pProdSF);
	      pProdSF = (VariantSF)subg_next_subgoal(pProdSF) )
	  nSubgoals++;
  }
  else if ( sm == &smConsSF ) {
    for ( tif = tif_list.first;  IsNonNULL(tif);  tif = TIF_NextTIF(tif) )
      if ( IsSubsumptivePredicate(tif) )
	for ( pProdSF = TIF_Subgoals(tif);  IsNonNULL(pProdSF);
	      pProdSF = (VariantSF)subg_next_subgoal(pProdSF) )
	  for ( pConsSF = subg_consumers(pProdSF);  IsNonNULL(pConsSF); 
		pConsSF = conssf_consumers(pConsSF) )
	    nSubgoals++;
  }
  else {
    SYS_MUTEX_UNLOCK( MUTEX_TABLE );				
    xsb_dbgmsg((LOG_DEBUG, "Incorrect use of subgoal_statistics()\n"
	       "SM does not contain subgoal frames"));
    return sg_stats;
  }

  SYS_MUTEX_UNLOCK( MUTEX_TABLE );				
  if ( NodeStats_NumUsedNodes(sg_stats) != (counter) nSubgoals )
    xsb_warn(CTXTc "Inconsistent Subgoal Frame Usage Calculations:\n"
	     "\tSubgoal Frame count mismatch");

  return sg_stats;
}
Пример #13
0
void show_table_state(xsbHashTable *table)
{
  xsbBucket *bucket;
  int i;

  xsb_dbgmsg((LOG_DEBUG,"\nCell Status\tOverflow Count\n"));
  for (i=0; i < table->length; i++) {
    bucket = get_top_bucket(table,i);
    if (is_free_bucket(bucket)) {
      /* free cell */
      xsb_dbgmsg((LOG_DEBUG, "   ---\t\t   ---"));
    } else {
      int overflow_count=0;

      fprintf(stddbg, "  taken\t\t");
      bucket = bucket->next;
      while (bucket != NULL) {
	overflow_count++;
	bucket = bucket->next;
      }
      xsb_dbgmsg((LOG_DEBUG,"   %d", overflow_count));
    }
  }
}
Пример #14
0
void dsInit(DynamicStack *ds, size_t stack_size, size_t frame_size,
	    char *name) {

  size_t total_bytes;

  xsb_dbgmsg((LOG_TRIE_STACK, "Initializing %s", name));

  total_bytes = stack_size * frame_size;
  DynStk_Base(*ds) = malloc(total_bytes);
  if ( IsNULL(DynStk_Base(*ds)) )
    xsb_abort("Ran out of memory in allocation of %s", DynStk_Name(*ds));
  DynStk_Top(*ds) = DynStk_Base(*ds);
  DynStk_Ceiling(*ds) = (char *)DynStk_Base(*ds) + total_bytes;
  DynStk_FrameSize(*ds) = frame_size;
  DynStk_InitSize(*ds) = DynStk_CurSize(*ds) = stack_size;
  DynStk_Name(*ds) = name;
}
Пример #15
0
Файл: subp.c Проект: flavioc/XSB
/* Our separate thread */
void checkJavaInterrupt(void *info)
{
  char ch;
  SOCKET intSocket = (SOCKET)info;
  xsb_dbgmsg((LOG_DEBUG, "Thread started on socket %ld",(int)intSocket));
  while(1){
    if (1!=recv(intSocket,&ch,1,0)) {
      xsb_warn("Problem handling interrupt from Java");
    }
    else 
      xsb_mesg("--- Java interrupt detected");
    /* Avoid those annoying lags? */
    fflush(stdout);
    fflush(stderr);
    fflush(stdmsg);
    fflush(stdwarn);
    fflush(stddbg);
    keyint_proc(SIGINT); /* Do XSB's "interrupt" thing */
  }
}
Пример #16
0
void dsShrink(DynamicStack *ds) {

  size_t total_bytes;
  char *new_base;

  if ( DynStk_CurSize(*ds) <= DynStk_InitSize(*ds) )
    return;
  total_bytes = DynStk_InitSize(*ds) * DynStk_FrameSize(*ds);
  new_base = realloc(DynStk_Base(*ds),total_bytes);

  xsb_dbgmsg((LOG_TRIE_STACK, "Shrinking %s: %d -> %d", DynStk_Name(*ds),
	     DynStk_CurSize(*ds), DynStk_InitSize(*ds)));

  if ( IsNULL(new_base) )
    xsb_abort("Ran out of memory during expansion of %s", DynStk_Name(*ds));
  DynStk_Top(*ds) =
    new_base + ((char *)DynStk_Top(*ds) - (char *)DynStk_Base(*ds));
  DynStk_Base(*ds) = new_base;
  DynStk_Ceiling(*ds) = new_base + total_bytes;
  DynStk_CurSize(*ds) = DynStk_InitSize(*ds);
}
Пример #17
0
static DE intern_delay_element(Cell delay_elem)
{
  DE de;
  CPtr cptr = (CPtr) cs_val(delay_elem);
  /*
   * All the following information about delay_elem is set in
   * delay_negatively() or delay_positively().  Note that cell(cptr) is
   * the delay_psc ('DL').
   */
  VariantSF subgoal;
  NODEptr ans_subst;
  CPtr ret_n = 0;
  int arity;
  Cell tmp_cell;

  tmp_cell = cell(cptr + 1);
  subgoal = (VariantSF) addr_val(tmp_cell);
  tmp_cell = cell(cptr + 2);
  ans_subst = (NODEptr) addr_val(tmp_cell);
  tmp_cell = cell(cptr + 3);
  
  /*
   * cell(cptr + 3) can be one of the following:
   *   1. integer 0 (NEG_DELAY), for a negative DE;
   *   2. string "ret", for a positive DE with arity 0;
   *   3. constr ret/n, for a positive DE with arity >=1.
   */
  if (isinteger(tmp_cell) || isstring(tmp_cell))
    arity = 0;
  else {
    ret_n = (CPtr) cs_val(tmp_cell);
    arity = get_arity((Psc) get_str_psc(cell(cptr + 3)));
  }

#ifdef DEBUG_DELAYVAR
  xsb_dbgmsg((LOG_DEBUG,">>>> "));
  dbg_print_delay_list(LOG_DEBUG,stddbg, delayreg);
  xsb_dbgmsg((LOG_DEBUG, "\n"));
  xsb_dbgmsg((LOG_DEBUG, ">>>> (Intern ONE de) arity of answer subsf = %d\n", 
	     arity));
#endif

  if (!was_simplifiable(subgoal, ans_subst)) {
    new_entry(de,
	      released_des,
	      next_free_de,
	      current_de_block,
	      current_de_block_top,
	      de_next,
	      DE,
	      de_block_size,
	      "Not enough memory to expand DE space");
    de_subgoal(de) = subgoal;
    de_ans_subst(de) = ans_subst; /* Leaf of the answer (substitution) trie */

#ifdef DEBUG_DELAYVAR
    de_subs_fact(de) = NULL;
#ifndef IGNORE_DELAYVAR
    if (arity != 0) {
      de_subs_fact_leaf(de) = delay_chk_insert(arity, ret_n + 1,
					       (CPtr *) &de_subs_fact(de));
    }
#endif /* IGNORE_DELAYVAR */
#else
#ifndef IGNORE_DELAYVAR
    if (arity != 0) {
      CPtr hook = NULL;
      de_subs_fact_leaf(de) = delay_chk_insert(arity, ret_n + 1,
					       &hook);
    }
#endif /* IGNORE_DELAYVAR */
#endif
    return de;
  }
  else
    return NULL;
}
Пример #18
0
int gc_heap(CTXTdeclc int arity, int ifStringGC)
{
#ifdef GC
  CPtr p;
  double  begin_marktime, end_marktime,
    end_slidetime, end_copy_time,
    begin_stringtime, end_stringtime;
  size_t  marked = 0, marked_dregs = 0, i;
  int ii;
  size_t  start_heap_size;
  size_t  rnum_in_trieinstr_unif_stk = (trieinstr_unif_stkptr-trieinstr_unif_stk)+1;
  DECL_GC_PROFILE;
  garbage_collecting = 1;  // flag for profiling that we are gc-ing

  //  printf("start gc(%ld): e:%p,h:%p,hf:%p\n",(long)(cpu_time()*1000),ereg,hreg,hfreg);
  INIT_GC_PROFILE;
  if (pflags[GARBAGE_COLLECT] != NO_GC) {
    num_gc++ ;
    GC_PROFILE_PRE_REPORT;
    slide = (pflags[GARBAGE_COLLECT] == SLIDING_GC) | 
      (pflags[GARBAGE_COLLECT] == INDIRECTION_SLIDE_GC);
    
    if (fragmentation_only) 
      slide = FALSE;
    heap_early_reset = ls_early_reset = 0;
    
    GC_PROFILE_START_SUMMARY;
    
    begin_marktime = cpu_time();
    start_heap_size = hreg+1-(CPtr)glstack.low;
    
    /* make sure the top choice point heap pointer 
       that might not point into heap, does */
    if (hreg == cp_hreg(breg)) {
      *hreg = makeint(666) ;
      hreg++;
    }
#ifdef SLG_GC
    /* same for the freeze heap pointer */
    if (hfreg == hreg && hreg == cp_hreg(bfreg)) {
      *hreg = makeint(66600);
      hreg++;
    }
#endif
    
    /* copy the aregs to the top of the heap - only if sliding */
    /* just hope there is enough space */
    /* this happens best before the stack_boundaries are computed */
    if (slide) {
      if (delayreg != NULL) {
	arity++;
	reg[arity] = (Cell)delayreg;
      }
      for (ii = 1; ii <= arity; ii++) {
	//	printf("reg[%d] to heap: %lx\n",ii,(size_t)reg[i]);
	*hreg = reg[ii];
	hreg++;
      }
      arity += (int)rnum_in_trieinstr_unif_stk;
      for (i = 0; i < rnum_in_trieinstr_unif_stk; i++) {
	//	printf("trieinstr_unif_stk[%d] to heap: %lx\n",i,(size_t)trieinstr_unif_stk[i]);
	*hreg = trieinstr_unif_stk[i];
	hreg++;
      }
      //      printf("extended heap: hreg=%p, arity=%d, rnum_in=%d\n",hreg,arity, rnum_in_trieinstr_unif_stk);
#ifdef SLG_GC
      /* in SLGWAM, copy hfreg to the heap */
      //      printf("hfreg to heap is %p at %p, rnum_in_trieinstr_unif_stk=%d,arity=%d,delay=%p\n",hfreg,hreg,rnum_in_trieinstr_unif_stk,arity,delayreg);
      *(hreg++) = (Cell) hfreg;
#endif
    }

    if (top_of_localstk < hreg) {
      fprintf(stderr,"stack clobbered: no space for gc_heap\n"); 
      xsb_exit( "stack clobbered");
    }

    gc_strings = ifStringGC; /* default */
    gc_strings = should_gc_strings();  // collect strings for any reason?
    marked = mark_heap(CTXTc arity, &marked_dregs);
    
    end_marktime = cpu_time();
    
    if (fragmentation_only) {
      /* fragmentation is expressed as ratio not-marked/total heap in use
	 this is internal fragmentation only.  we print marked and total,
	 so that postprocessing can do what it wants with this info. */
      xsb_dbgmsg((LOG_GC, "marked_used_missed(%d,%d,%d,%d).",
		 marked,hreg+1-(CPtr)glstack.low,
		 heap_early_reset,ls_early_reset));

    free_marks:

#ifdef PRE_IMAGE_TRAIL
      /* re-tag pre image cells in trail */
      for (p = tr_bot; p <= tr_top ; p++ ) {
	if (tr_pre_marked(p-tr_bot)) {
	  *p = *p | PRE_IMAGE_MARK;
	  tr_clear_pre_mark(p-tr_bot);
	}
      }
#endif

      /* get rid of the marking areas - if they exist */
      if (heap_marks)  { mem_dealloc((heap_marks-1),heap_marks_size,GC_SPACE); heap_marks = NULL; }
      if (tr_marks)    { mem_dealloc(tr_marks,tr_top-tr_bot+1,GC_SPACE); tr_marks = NULL; }
      if (ls_marks)    { mem_dealloc(ls_marks,ls_bot - ls_top + 1,GC_SPACE); ls_marks = NULL; }
      if (cp_marks)    { mem_dealloc(cp_marks,cp_bot - cp_top + 1,GC_SPACE); cp_marks = NULL; }
      if (slide_buf)   { mem_dealloc(slide_buf,(slide_buf_size+1)*sizeof(CPtr),GC_SPACE); slide_buf = NULL; }
      goto end;
    }
    
    GC_PROFILE_MARK_SUMMARY;
    
    /* An attempt to add some gc/expansion policy;
       ideally this should be user-controlled */
#if (! defined(GC_TEST))
    if (marked > ((hreg+1-(CPtr)glstack.low)*mark_threshold))
      {
	GC_PROFILE_QUIT_MSG;
        if (slide)
          hreg -= arity;
	total_time_gc += (double) 
	  (end_marktime-begin_marktime);
        goto free_marks; /* clean-up temp areas and get out of here... */
      }
#endif
    
    total_collected += (start_heap_size - marked);

    if (slide)
      {
	GC_PROFILE_SLIDE_START_TIME;
	hreg = slide_heap(CTXTc marked) ;

#ifdef DEBUG_VERBOSE
	if (hreg != (heap_bot+marked))
	  xsb_dbgmsg((LOG_GC, "heap sliding gc - inconsistent hreg"));
#endif

#ifdef SLG_GC
	/* copy hfreg back from the heap */
	hreg--;
	hfreg = (CPtr) *hreg;
#endif

	/* copy the aregs from the top of the heap back */
	hreg -= arity;
	hbreg = cp_hreg(breg);
	
	p = hreg;
	
	arity -= (int)rnum_in_trieinstr_unif_stk;
	for (ii = 1; ii <= arity; ii++) {
	  reg[ii] = *p++;
	  //	  printf("heap to reg[%d]: %lx\n",ii,(size_t)reg[i]);
	}
	if (delayreg != NULL)
	  delayreg = (CPtr)reg[arity--];
	for (i = 0; i < rnum_in_trieinstr_unif_stk; i++) {
	  trieinstr_unif_stk[i] = *p++;
	  //	  printf("heap to trieinstr_unif_stk[%d]: %lx\n",i,(size_t)trieinstr_unif_stk[i]);
	}

	end_slidetime = cpu_time();
	
	total_time_gc += (double) 
	  (end_slidetime - begin_marktime);
	
	GC_PROFILE_SLIDE_FINAL_SUMMARY;
      }
    else
      { /* else we call the copying collector a la Cheney */
	CPtr begin_new_heap, end_new_heap;
	
	GC_PROFILE_COPY_START_TIME;
	
	begin_new_heap = (CPtr)mem_alloc(marked*sizeof(Cell),GC_SPACE);
	if (begin_new_heap == NULL)
	  xsb_exit( "copying garbage collection could not allocate new heap");
	end_new_heap = begin_new_heap+marked;

	hreg = copy_heap(CTXTc marked,begin_new_heap,end_new_heap,arity);

	mem_dealloc(begin_new_heap,marked*sizeof(Cell),GC_SPACE);
	adapt_hfreg_from_choicepoints(CTXTc hreg);
	hbreg = cp_hreg(breg);

#ifdef SLG_GC
	hfreg = hreg;
#endif
	end_copy_time = cpu_time();
	
	total_time_gc += (double) 
	  (end_copy_time - begin_marktime);
	
	GC_PROFILE_COPY_FINAL_SUMMARY;
      }
    
    if (print_on_gc) print_all_stacks(CTXTc arity);
    
    /* get rid of the marking areas - if they exist */
    if (heap_marks)  { 
      check_zero(heap_marks,(heap_top - heap_bot),"heap") ;
      mem_dealloc((heap_marks-1),heap_marks_size,GC_SPACE) ; /* see its calloc */
      heap_marks = NULL ;
    }
    if (tr_marks)    { 
      check_zero(tr_marks,(tr_top - tr_bot + 1),"tr") ;
      mem_dealloc(tr_marks,tr_top-tr_bot+1,GC_SPACE) ;
      tr_marks = NULL ;
    }
    if (ls_marks)    { 
      check_zero(ls_marks,(ls_bot - ls_top + 1),"ls") ;
      mem_dealloc(ls_marks,ls_bot - ls_top + 1,GC_SPACE) ;
      ls_marks = NULL ;
    }
    if (cp_marks)    {  
      check_zero(cp_marks,(cp_bot - cp_top + 1),"cp") ;
      mem_dealloc(cp_marks,cp_bot - cp_top + 1,GC_SPACE) ;
      cp_marks = NULL ;
    }
    if (slide_buf)   { 
      mem_dealloc(slide_buf,(slide_buf_size+1)*sizeof(CPtr),GC_SPACE); 
      slide_buf = NULL; 
    }
#ifdef SAFE_GC
    p = hreg;
    while (p < heap_top)
      *p++ = 0;
#endif
    
  } /* if (pflags[GARBAGE_COLLECT]) */
#else
  /* for no-GC, there is no gc, but stack expansion can be done */
#endif
  
#ifdef GC
 end:
  
  /*************** GC STRING-TABLE (already marked from heap) *******************/
#ifndef NO_STRING_GC
#ifdef MULTI_THREAD
  if (flags[NUM_THREADS] == 1) {
#endif
    if (gc_strings && (flags[STRING_GARBAGE_COLLECT] == 1)) {
      num_sgc++;
      begin_stringtime = cpu_time();
      mark_nonheap_strings(CTXT);
      free_unused_strings();
      //      printf("String GC reclaimed: %d bytes\n",beg_string_space_size - pspacesize[STRING_SPACE]);
      gc_strings = FALSE;
      end_stringtime = cpu_time();
      total_time_gc += end_stringtime - begin_stringtime;
    }
    /* update these even if no GC, to avoid too many calls just to gc strings */
    last_string_space_size = pspacesize[STRING_SPACE];
    last_assert_space_size = pspacesize[ASSERT_SPACE];
    force_string_gc = FALSE;
#ifdef MULTI_THREAD
  }
#endif
#endif /* ndef NO_STRING_GC */

  GC_PROFILE_POST_REPORT;
  garbage_collecting = 0;
  
#endif /* GC */
  //  printf("   end gc(%ld), hf:%p,h:%p, space=%d\n",(long)(cpu_time()*1000),hfreg,hreg,(pb)top_of_localstk - (pb)top_of_heap);

  return(TRUE);

} /* gc_heap */
Пример #19
0
xsbBool glstack_realloc(CTXTdeclc size_t new_size, int arity)
{
  CPtr   new_heap_bot=NULL ;       /* bottom of new Global Stack area */
  CPtr   new_ls_bot ;         /* bottom of new Local Stack area */

  size_t heap_offset ;        /* offsets between the old and new */
  size_t local_offset ;       /* stack bottoms, measured in Cells */

  CPtr   *cell_ptr ;
  Cell   cell_val ;
  size_t i, rnum_in_trieinstr_unif_stk = (trieinstr_unif_stkptr-trieinstr_unif_stk)+1;

  size_t  new_size_in_bytes, new_size_in_cells ; /* what a mess ! */
  double   expandtime ;

  if (new_size <= glstack.size) { // asked to shrink
    // new_size is space needed + half of init_size, rounded to K
    new_size = (((glstack.high - (byte *)top_of_localstk) +
		 ((byte *)hreg - glstack.low)) + glstack.init_size*K/2 + (K-1)) / K;
    // but not smaller than init_size
    if (new_size < glstack.init_size) new_size = glstack.init_size;
    if (new_size >= glstack.size) return 0;  // computed new_size won't shrink
    //    printf("shrinking glstack from %dK to %dK\n",glstack.size,new_size);
  }

  //  fprintf(stddbg,"Reallocating the Heap and Local Stack data area");
#ifdef DEBUG_VERBOSE
  if (LOG_REALLOC <= cur_log_level) {
    if (glstack.size == glstack.init_size) {
      xsb_dbgmsg((LOG_REALLOC,"\tBottom:\t\t%p\t\tInitial Size: %" Intfmt "K",
		 glstack.low, glstack.size));
      xsb_dbgmsg((LOG_REALLOC,"\tTop:\t\t%p", glstack.high));
    }
  }
#endif
  expandtime = cpu_time();

  new_size_in_bytes = new_size*K ;
  new_size_in_cells = new_size_in_bytes/sizeof(Cell) ;
  		/* and let's hope K stays divisible by sizeof(Cell) */

  stack_boundaries ;

  /* Expand the data area and push the Local Stack to the high end. */

  if (new_size < glstack.size) { //shrinking
    // move local stack down
    memmove(glstack.low + new_size_in_bytes-(glstack.high-(byte *)ls_top),  // to
	    ls_top,  // from
	    glstack.high - (byte *)ls_top  // size
	    );
    new_heap_bot = (CPtr)realloc(heap_bot, new_size_in_bytes);
    heap_offset = new_heap_bot - heap_bot ;
    new_ls_bot = new_heap_bot + new_size_in_cells - 1 ;
    local_offset = new_ls_bot - ls_bot ;
  } else { // expanding
    if (!USER_MEMORY_LIMIT_EXHAUSTED(new_size)) 
      new_heap_bot = (CPtr)realloc(heap_bot, new_size_in_bytes);
    if (new_heap_bot == NULL) {
      if (2*glstack.size == new_size) { /* if trying to double, try backing off, may not help */
	size_t increment = new_size;
	while (new_heap_bot == NULL && increment > 40) {
	  increment = increment/2;
	  new_size = glstack.size + increment;
	  new_size_in_bytes = new_size*K ;
	  new_size_in_cells = new_size_in_bytes/sizeof(Cell) ;
	  if (!USER_MEMORY_LIMIT_EXHAUSTED(new_size))
	    new_heap_bot = (CPtr)realloc(heap_bot, new_size_in_bytes);
	}
	if (new_heap_bot == NULL) {
	  //	  xsb_mesg("Not enough core to resize the Heap/Local Stack! (current: %"Intfmt"; resize %"Intfmt")",
	  //   glstack.size*K,new_size_in_bytes);
	  return 1; /* return an error output -- will be picked up later */
	}
      } else {
	xsb_mesg("Not enough core to resize the Heap and Local Stack! (%" Intfmt ")",new_size_in_bytes);
	return 1; /* return an error output -- will be picked up later */
      }
    }
    //    printf("realloced heap %d -> %d\n",glstack.size,new_size);
    heap_offset = new_heap_bot - heap_bot ;
    new_ls_bot = new_heap_bot + new_size_in_cells - 1 ;
    local_offset = new_ls_bot - ls_bot ;

#if defined(GENERAL_TAGGING)
    //  printf("glstack expand %p %p\n",(void *)new_heap_bot,(void *)new_ls_bot+1);
    extend_enc_dec_as_nec(new_heap_bot,new_ls_bot+1);
#endif

    memmove(ls_top + local_offset,             /* move to */
	    ls_top + heap_offset,              /* move from */
	    (ls_bot - ls_top + 1)*sizeof(Cell) );      /* number of bytes */
  }

  initialize_glstack(heap_top + heap_offset,ls_top+local_offset);

  /* TLS: below, the condition should not need to be commented out.
     If the heap expands, there should be no pointers from heap into
     the local stack, so we shouldnt need to traverse the heap.
     However, call subumption code actually copies the substitution
     factor from the CPS to heap (I dont know why, but see the comment
     after the call to subsumptive_call_search() in slginsts_xsb_i.h),
     so that substitution factor pointers may point from the heap to
     local stack.  Therefore the pointer update causes the heap-ls
     pointers to be harmless at glstack expansion.
  */

  /* Update the Heap links */
  //  if (heap_offset != 0) {
    for (cell_ptr = (CPtr *)(heap_top + heap_offset);
	 cell_ptr-- > (CPtr *)new_heap_bot;
	 )
      { reallocate_heap_or_ls_pointer(cell_ptr) ; }
    //  }

  /* Update the pointers in the Local Stack */
  for (cell_ptr = (CPtr *)(ls_top + local_offset);
       cell_ptr <= (CPtr *)new_ls_bot;
       cell_ptr++)
  { reallocate_heap_or_ls_pointer(cell_ptr) ; }

  /* Update the trailed variable pointers */
  for (cell_ptr = (CPtr *)top_of_trail - 1;
       cell_ptr > (CPtr *)tcpstack.low;
       cell_ptr = cell_ptr - 2)
  { /* first the value */
    reallocate_heap_or_ls_pointer(cell_ptr);
    /* now the address */
    cell_ptr-- ;
    cell_val = (Cell)*cell_ptr ;
#ifdef PRE_IMAGE_TRAIL
    if ((size_t) cell_val & PRE_IMAGE_MARK) {
      /* remove tag */
      cell_val = (Cell) ((Cell) cell_val & ~PRE_IMAGE_MARK);
      /* realloc and tag */
      realloc_ref_pre_image(cell_ptr,(CPtr)cell_val) ;
      cell_ptr--;
      /* realoc pre-image */
      reallocate_heap_or_ls_pointer(cell_ptr);
    } else
#endif
      realloc_ref(cell_ptr,(CPtr)cell_val) ;
  }

  /* Update the CP Stack pointers */
  for (cell_ptr = (CPtr *)top_of_cpstack;
       cell_ptr < (CPtr *)tcpstack.high;
       cell_ptr++)
  { reallocate_heap_or_ls_pointer(cell_ptr) ; }

  /* Update the argument registers */
  while (arity)
  { cell_ptr = (CPtr *)(reg+arity) ;
    reallocate_heap_or_ls_pointer(cell_ptr) ;
    arity-- ;  
  }

  i = 0;
  while (i < rnum_in_trieinstr_unif_stk) {
    cell_ptr = (CPtr *)(trieinstr_unif_stk+i);
    //    printf(" reallocate trieinstr_unif_stk[%d]=%p\n",i,cell_ptr);
    reallocate_heap_or_ls_pointer(cell_ptr) ;
    i++;
  }

  /* Update the system variables */
  glstack.low = (byte *)new_heap_bot ;
  glstack.high = (byte *)(new_ls_bot + 1) ;
  pspace_tot_gl = pspace_tot_gl + (new_size - glstack.size)*K;
  glstack.size = new_size ;

  hreg = (CPtr)hreg + heap_offset ;
  hbreg = (CPtr)hbreg + heap_offset ;
  hfreg = (CPtr)hfreg + heap_offset ;
  ereg = (CPtr)ereg + local_offset ;
  ebreg = (CPtr)ebreg + local_offset ;
  efreg = (CPtr)efreg + local_offset ;

  if (islist(delayreg))
    delayreg = (CPtr)makelist(clref_val(delayreg) + heap_offset);

  expandtime = cpu_time() - expandtime;

  xsb_dbgmsg((LOG_REALLOC,"\tNew Bottom:\t%p\t\tNew Size: %" Intfmt "K",
	     glstack.low, glstack.size));
  xsb_dbgmsg((LOG_REALLOC,"\tNew Top:\t%p", glstack.high));
  xsb_dbgmsg((LOG_REALLOC,
	     "Heap/Local Stack data area expansion - finished in %lf secs\n",
	     expandtime));

  return 0;
} /* glstack_realloc */
Пример #20
0
int gc_heap(int arity)
{
#ifdef GC
  CPtr p;
  unsigned long  begin_marktime, end_marktime,
    end_slidetime, end_copy_time;
  int  marked = 0, marked_dregs = 0, i;
  int  start_heap_size;
  DECL_GC_PROFILE;
  
  INIT_GC_PROFILE;
  if (flags[GARBAGE_COLLECT] != NO_GC) {
    num_gc++ ;
    GC_PROFILE_PRE_REPORT;
    slide = (flags[GARBAGE_COLLECT] == SLIDING_GC) | 
      (flags[GARBAGE_COLLECT] == INDIRECTION_SLIDE_GC);
    
    if (fragmentation_only) 
      slide = FALSE;
    heap_early_reset = ls_early_reset = 0;
    
    GC_PROFILE_START_SUMMARY;
    
    begin_marktime = cpu_time();
    start_heap_size = hreg+1-(CPtr)glstack.low;
    
    /* make sure the top choice point heap pointer 
       that might not point into heap, does */
    if (hreg == cp_hreg(breg)) {
      *hreg = makeint(666) ;
      hreg++ ;
    }
#ifdef SLG_GC
    /* same for the freeze heap pointer */
    if (hfreg == hreg && hreg == cp_hreg(bfreg)) {
      *hreg = makeint(66600);
      hreg++;
    }
#endif
    
    /* copy the aregs to the top of the heap - only if sliding */
    /* just hope there is enough space */
    /* this happens best before the stack_boundaries are computed */
    if (slide) {
      if (delayreg != NULL) {
	arity++;
	reg[arity] = (Cell)delayreg;
      }
      for (i = 1; i <= arity; i++) {
	*hreg = reg[i];
	hreg++;
      }
    }
    
#ifdef SLG_GC
    /* in SLGWAM, copy hfreg to the heap */
    if (slide) {
      *hreg = (unsigned long) hfreg;
      hreg++;
    }
#endif

    marked = mark_heap(arity, &marked_dregs);
    
    end_marktime = cpu_time();
    
    if (fragmentation_only) {
      /* fragmentation is expressed as ratio not-marked/total heap in use
	 this is internal fragmentation only.  we print marked and total,
	 so that postprocessing can do what it wants with this info. */
      xsb_dbgmsg((LOG_GC, "marked_used_missed(%d,%d,%d,%d).",
		 marked,hreg+1-(CPtr)glstack.low,
		 heap_early_reset,ls_early_reset));
    free_marks:
      /* get rid of the marking areas - if they exist */
      if (heap_marks)  { free((heap_marks-1)); heap_marks = NULL; }
      if (tr_marks)    { free(tr_marks); tr_marks = NULL; }
      if (ls_marks)    { free(ls_marks); ls_marks = NULL; }
      if (cp_marks)    { free(cp_marks); cp_marks = NULL; }
      goto end;
    }
    
    GC_PROFILE_MARK_SUMMARY;
    
    /* An attempt to add some gc/expansion policy;
       ideally this should be user-controlled */
#if (! defined(GC_TEST))
    if (marked > ((hreg+1-(CPtr)glstack.low)*mark_threshold))
      {
	GC_PROFILE_QUIT_MSG;
        if (slide)
          hreg -= arity;
	total_time_gc += (double) 
	  (end_marktime-begin_marktime)*1000/CLOCKS_PER_SEC;
        goto free_marks; /* clean-up temp areas and get out of here... */
      }
#endif
    
    total_collected += (start_heap_size - marked);

    if (slide)
      {
	GC_PROFILE_SLIDE_START_TIME;

	hreg = slide_heap(marked) ;

	if (hreg != (heap_bot+marked))
	  xsb_dbgmsg((LOG_GC, "heap sliding gc - inconsistent hreg"));
#ifdef SLG_GC
	/* copy hfreg back from the heap */
	hreg--;
	hfreg = (unsigned long*) *hreg;
#endif

	/* copy the aregs from the top of the heap back */
	hreg -= arity;
	hbreg = cp_hreg(breg);
	
	p = hreg;
	
	for (i = 1; i <= arity; i++)
	  reg[i] = *p++ ;
	if (delayreg != NULL)
	  delayreg = (CPtr)reg[arity--];

	end_slidetime = cpu_time();
	
	total_time_gc += (double) 
	  (end_slidetime - begin_marktime)*1000/CLOCKS_PER_SEC;
	
	GC_PROFILE_SLIDE_FINAL_SUMMARY;
      }
    else
      { /* else we call the copying collector a la Cheney */
	CPtr begin_new_heap, end_new_heap;
	
	GC_PROFILE_COPY_START_TIME;
	
	begin_new_heap = (CPtr)malloc(marked*sizeof(Cell));
	if (begin_new_heap == NULL)
	  xsb_exit("copying garbage collection could not allocate new heap");
	end_new_heap = begin_new_heap+marked;

	hreg = copy_heap(marked,begin_new_heap,end_new_heap,arity);

	free(begin_new_heap);
	adapt_hfreg_from_choicepoints(hreg);
	hbreg = cp_hreg(breg);

#ifdef SLG_GC
	hfreg = hreg;
#endif
	end_copy_time = cpu_time();
	
	total_time_gc += (double) 
	  (end_copy_time - begin_marktime)*1000/CLOCKS_PER_SEC;
	
	GC_PROFILE_COPY_FINAL_SUMMARY;
      }
    
    if (print_on_gc) print_all_stacks(arity);
    
    /* get rid of the marking areas - if they exist */
    if (heap_marks)  { 
      check_zero(heap_marks,(heap_top - heap_bot),"heap") ;
      free((heap_marks-1)) ; /* see its calloc */
      heap_marks = NULL ;
    }
    if (tr_marks)    { 
      check_zero(tr_marks,(tr_top - tr_bot + 1),"tr") ;
      free(tr_marks) ;
      tr_marks = NULL ;
    }
    if (ls_marks)    { 
      check_zero(ls_marks,(ls_bot - ls_top + 1),"ls") ;
      free(ls_marks) ;
      ls_marks = NULL ;
    }
    if (cp_marks)    {  
      check_zero(cp_marks,(cp_bot - cp_top + 1),"cp") ;
      free(cp_marks) ;
      cp_marks = NULL ;
    }
#ifdef SAFE_GC
    p = hreg;
    while (p < heap_top)
      *p++ = 0;
#endif
    
  } /* if (flags[GARBAGE_COLLECT]) */
#else
  /* for no-GC, there is no gc, but stack expansion can be done */
#endif
  
#ifdef GC
 end:
  
  GC_PROFILE_POST_REPORT;
  
#endif /* GC */
  return(TRUE);

} /* gc_heap */
Пример #21
0
xsbBool glstack_realloc(int new_size, int arity)
{
  CPtr   new_heap_bot ;       /* bottom of new Global Stack area */
  CPtr   new_ls_bot ;         /* bottom of new Local Stack area */

  long   heap_offset ;        /* offsets between the old and new */
  long   local_offset ;       /* stack bottoms, measured in Cells */

  CPtr   *cell_ptr ;
  Cell   cell_val ;

  size_t new_size_in_bytes, new_size_in_cells ; /* what a mess ! */
  long   expandtime ;

  if (new_size <= glstack.size) return 0;

  xsb_dbgmsg((LOG_REALLOC, 
	     "Reallocating the Heap and Local Stack data area"));
#ifdef DEBUG_VERBOSE
  if (LOG_REALLOC <= cur_log_level) {
    if (glstack.size == glstack.init_size) {
      xsb_dbgmsg((LOG_REALLOC,"\tBottom:\t\t%p\t\tInitial Size: %ldK",
		 glstack.low, glstack.size));
      xsb_dbgmsg((LOG_REALLOC,"\tTop:\t\t%p", glstack.high));
    }
  }
#endif

  expandtime = (long)(1000*cpu_time()) ;

  new_size_in_bytes = new_size*K ;
  new_size_in_cells = new_size_in_bytes/sizeof(Cell) ;
  		/* and let's hope K stays divisible by sizeof(Cell) */

  stack_boundaries ;

  /* Expand the data area and push the Local Stack to the high end. */

  new_heap_bot = (CPtr)realloc(heap_bot, new_size_in_bytes);
  if (new_heap_bot == NULL) {
    xsb_mesg("Not enough core to resize the Heap and Local Stack!");
    return 1; /* return an error output -- will be picked up later */
  }
  heap_offset = new_heap_bot - heap_bot ;
  new_ls_bot = new_heap_bot + new_size_in_cells - 1 ;
  local_offset = new_ls_bot - ls_bot ;
  memmove(ls_top + local_offset,             /* move to */
	  ls_top + heap_offset,              /* move from */
	  (ls_bot - ls_top + 1)*sizeof(Cell) );      /* number of bytes */

  /* Update the Heap links */
  for (cell_ptr = (CPtr *)(heap_top + heap_offset);
       cell_ptr-- > (CPtr *)new_heap_bot;
      )
  { reallocate_heap_or_ls_pointer(cell_ptr) ; }

  /* Update the pointers in the Local Stack */
  for (cell_ptr = (CPtr *)(ls_top + local_offset);
       cell_ptr <= (CPtr *)new_ls_bot;
       cell_ptr++)
  { reallocate_heap_or_ls_pointer(cell_ptr) ; }

  /* Update the trailed variable pointers */
  for (cell_ptr = (CPtr *)top_of_trail - 1;
       cell_ptr > (CPtr *)tcpstack.low;
       cell_ptr = cell_ptr - 2)
  { /* first the value */
    reallocate_heap_or_ls_pointer(cell_ptr);
    /* now the address */
    cell_ptr-- ;
    cell_val = (Cell)*cell_ptr ;
    realloc_ref(cell_ptr,(CPtr)cell_val) ;
  }

  /* Update the CP Stack pointers */
  for (cell_ptr = (CPtr *)top_of_cpstack;
       cell_ptr < (CPtr *)tcpstack.high;
       cell_ptr++)
  { reallocate_heap_or_ls_pointer(cell_ptr) ; }

  /* Update the argument registers */
  while (arity)
  { cell_ptr = (CPtr *)(reg+arity) ;
    reallocate_heap_or_ls_pointer(cell_ptr) ;
    arity-- ;  
  }

  /* Update the system variables */
  glstack.low = (byte *)new_heap_bot ;
  glstack.high = (byte *)(new_ls_bot + 1) ;
  glstack.size = new_size ;

  hreg = (CPtr)hreg + heap_offset ;
  hbreg = (CPtr)hbreg + heap_offset ;
  hfreg = (CPtr)hfreg + heap_offset ;
  ereg = (CPtr)ereg + local_offset ;
  ebreg = (CPtr)ebreg + local_offset ;
  efreg = (CPtr)efreg + local_offset ;

  if (islist(delayreg))
    delayreg = (CPtr)makelist(clref_val(delayreg) + heap_offset);

  expandtime = (long)(1000*cpu_time()) - expandtime;

  xsb_dbgmsg((LOG_REALLOC,"\tNew Bottom:\t%p\t\tNew Size: %ldK",
	     glstack.low, glstack.size));
  xsb_dbgmsg((LOG_REALLOC,"\tNew Top:\t%p", glstack.high));
  xsb_dbgmsg((LOG_REALLOC,
	     "Heap/Local Stack data area expansion - finished in %ld msecs\n",
	     expandtime));

  return 0;
} /* glstack_realloc */