Ejemplo n.º 1
0
static const char* barrier_get_typename(struct barrier_info* const p)
{
   tl_assert(p);

   return barrier_type_name(p->barrier_type);
}
Ejemplo n.º 2
0
/** Return true if the specified mutex is locked by any thread. */
static Bool mutex_is_locked(struct mutex_info* const p)
{
   tl_assert(p);
   return (p->recursion_count > 0);
}
Ejemplo n.º 3
0
void DRD_(mutex_set_trace)(const Bool trace_mutex)
{
   tl_assert(!! trace_mutex == trace_mutex);
   s_trace_mutex = trace_mutex;
}
/**
 * Wrapper for realloc(). Returns a pointer to the new block of memory, or
 * NULL if no new block could not be allocated. Notes:
 * - realloc(NULL, size) has the same effect as malloc(size).
 * - realloc(p, 0) has the same effect as free(p).
 * - success is not guaranteed even if the requested size is smaller than the
 *   allocated size.
*/
static void* drd_realloc(ThreadId tid, void* p_old, SizeT new_size)
{
   DRD_Chunk* mc;
   void*      p_new;
   SizeT      old_size;

   if (! p_old)
      return drd_malloc(tid, new_size);

   if (new_size == 0)
   {
      drd_free(tid, p_old);
      return NULL;
   }

   s_cmalloc_n_mallocs++;
   s_cmalloc_n_frees++;
   s_cmalloc_bs_mallocd += new_size;

   mc = VG_(HT_lookup)(s_malloc_list, (UWord)p_old);
   if (mc == NULL)
   {
      tl_assert(0);
      return NULL;
   }

   old_size = mc->size;

   if (old_size == new_size)
   {
      /* size unchanged */
      mc->where = VG_(record_ExeContext)(tid, 0);
      p_new = p_old;
   }
   else if (new_size < old_size)
   {
      /* new size is smaller but nonzero */
      s_stop_using_mem_callback(mc->data + new_size, old_size - new_size);
      mc->size = new_size;
      mc->where = VG_(record_ExeContext)(tid, 0);
      p_new = p_old;
   }
   else
   {
      /* new size is bigger */
      p_new = VG_(cli_malloc)(VG_(clo_alignment), new_size);

      if (p_new)
      {
         /* Copy from old to new. */
         VG_(memcpy)(p_new, p_old, mc->size);
         
         /* Free old memory. */
         VG_(cli_free)(p_old);
         if (mc->size > 0)
            s_stop_using_mem_callback(mc->data, mc->size);
         VG_(HT_remove)(s_malloc_list, (UWord)p_old);

         /* Update state information. */
         mc->data  = (Addr)p_new;
         mc->size  = new_size;
         mc->where = VG_(record_ExeContext)(tid, 0);
         VG_(HT_add_node)(s_malloc_list, mc);
         s_start_using_mem_callback((Addr)p_new, new_size, 0/*ec_uniq*/);
      }
      else
      {
         /* Allocation failed -- leave original block untouched. */
      }
   }  

   return p_new;
}
Ejemplo n.º 5
0
/**
 * Update mutex_info state when unlocking the pthread_mutex_t mutex.
 *
 * @param[in] mutex      Address of the client mutex.
 * @param[in] mutex_type Mutex type.
 *
 * @return New value of the mutex recursion count.
 *
 * @note This function must be called before pthread_mutex_unlock() is called,
 *       or a race condition is triggered !
 */
void DRD_(mutex_unlock)(const Addr mutex, MutexT mutex_type)
{
   const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
   const ThreadId vg_tid = VG_(get_running_tid)();
   struct mutex_info* p;

   p = DRD_(mutex_get)(mutex);
   if (p && mutex_type == mutex_type_unknown)
      mutex_type = p->mutex_type;

   if (s_trace_mutex) {
      DRD_(trace_msg)("[%d] mutex_unlock    %s 0x%lx rc %d",
                      drd_tid, p ? DRD_(mutex_get_typename)(p) : "(?)",
                      mutex, p ? p->recursion_count : 0);
   }

   if (p == 0 || mutex_type == mutex_type_invalid_mutex)
   {
      DRD_(not_a_mutex)(mutex);
      return;
   }

   if (p->owner == DRD_INVALID_THREADID)
   {
      MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
                           p->a1, p->recursion_count, p->owner };
      VG_(maybe_record_error)(vg_tid,
                              MutexErr,
                              VG_(get_IP)(vg_tid),
                              "Mutex not locked",
                              &MEI);
      return;
   }

   tl_assert(p);
   if (p->mutex_type != mutex_type) {
      MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
                           p->a1, p->recursion_count, p->owner };
      VG_(maybe_record_error)(vg_tid, MutexErr, VG_(get_IP)(vg_tid),
                              "Mutex type changed", &MEI);
   }
   tl_assert(p->mutex_type == mutex_type);
   tl_assert(p->owner != DRD_INVALID_THREADID);

   if (p->owner != drd_tid || p->recursion_count <= 0)
   {
      MutexErrInfo MEI = { DRD_(thread_get_running_tid)(),
                           p->a1, p->recursion_count, p->owner };
      VG_(maybe_record_error)(vg_tid,
                              MutexErr,
                              VG_(get_IP)(vg_tid),
                              "Mutex not locked by calling thread",
                              &MEI);
      return;
   }
   tl_assert(p->recursion_count > 0);
   p->recursion_count--;
   tl_assert(p->recursion_count >= 0);

   if (p->recursion_count == 0)
   {
      if (s_mutex_lock_threshold_ms > 0)
      {
         Long held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
         if (held > s_mutex_lock_threshold_ms)
         {
            HoldtimeErrInfo HEI
               = { DRD_(thread_get_running_tid)(),
                   mutex, p->acquired_at, held, s_mutex_lock_threshold_ms };
            VG_(maybe_record_error)(vg_tid,
                                    HoldtimeErr,
                                    VG_(get_IP)(vg_tid),
                                    "mutex",
                                    &HEI);
         }
      }

      /* This pthread_mutex_unlock() call really unlocks the mutex. Save the */
      /* current vector clock of the thread such that it is available when  */
      /* this mutex is locked again.                                        */

      DRD_(thread_get_latest_segment)(&p->last_locked_segment, drd_tid);
      if (!p->ignore_ordering)
         DRD_(thread_new_segment)(drd_tid);
      p->acquired_at = 0;
      s_mutex_segment_creation_count++;
   }
}
Ejemplo n.º 6
0
void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
{
   MC_Chunk* old_mc;
   MC_Chunk* new_mc;
   Addr      a_new; 
   SizeT     old_szB;

   if (MC_(record_fishy_value_error)(tid, "realloc", "size", new_szB))
      return NULL;

   cmalloc_n_frees ++;
   cmalloc_n_mallocs ++;
   cmalloc_bs_mallocd += (ULong)new_szB;

   /* Remove the old block */
   old_mc = VG_(HT_remove) ( MC_(malloc_list), (UWord)p_old );
   if (old_mc == NULL) {
      MC_(record_free_error) ( tid, (Addr)p_old );
      /* We return to the program regardless. */
      return NULL;
   }

   /* check if its a matching free() / delete / delete [] */
   if (MC_AllocMalloc != old_mc->allockind) {
      /* can not realloc a range that was allocated with new or new [] */
      tl_assert((Addr)p_old == old_mc->data);
      record_freemismatch_error ( tid, old_mc );
      /* but keep going anyway */
   }

   old_szB = old_mc->szB;

   /* Get new memory */
   a_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_szB);

   if (a_new) {
      /* In all cases, even when the new size is smaller or unchanged, we
         reallocate and copy the contents, and make the old block
         inaccessible.  This is so as to guarantee to catch all cases of
         accesses via the old address after reallocation, regardless of
         the change in size.  (Of course the ability to detect accesses
         to the old block also depends on the size of the freed blocks
         queue). */

      // Allocate a new chunk.
      new_mc = create_MC_Chunk( tid, a_new, new_szB, MC_AllocMalloc );

      // Now insert the new mc (with a new 'data' field) into malloc_list.
      VG_(HT_add_node)( MC_(malloc_list), new_mc );

      /* Retained part is copied, red zones set as normal */

      /* Redzone at the front */
      MC_(make_mem_noaccess)( a_new-MC_(Malloc_Redzone_SzB), 
                              MC_(Malloc_Redzone_SzB) );

      /* payload */
      if (old_szB >= new_szB) {
         /* new size is smaller or the same */

         /* Copy address range state and value from old to new */
         MC_(copy_address_range_state) ( (Addr)p_old, a_new, new_szB );
         VG_(memcpy)((void*)a_new, p_old, new_szB);
      } else {
         /* new size is bigger */
         UInt        ecu;

         /* Copy address range state and value from old to new */
         MC_(copy_address_range_state) ( (Addr)p_old, a_new, old_szB );
         VG_(memcpy)((void*)a_new, p_old, old_szB);

         // If the block has grown, we mark the grown area as undefined.
         // We have to do that after VG_(HT_add_node) to ensure the ecu
         // execontext is for a fully allocated block.
         ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
         tl_assert(VG_(is_plausible_ECU)(ecu));
         MC_(make_mem_undefined_w_otag)( a_new+old_szB,
                                         new_szB-old_szB,
                                         ecu | MC_OKIND_HEAP );

         /* Possibly fill new area with specified junk */
         if (MC_(clo_malloc_fill) != -1) {
            tl_assert(MC_(clo_malloc_fill) >= 0x00
                      && MC_(clo_malloc_fill) <= 0xFF);
            VG_(memset)((void*)(a_new+old_szB), MC_(clo_malloc_fill), 
                                                new_szB-old_szB);
         }
      }

      /* Redzone at the back. */
      MC_(make_mem_noaccess)        ( a_new+new_szB, MC_(Malloc_Redzone_SzB));

      /* Possibly fill freed area with specified junk. */
      if (MC_(clo_free_fill) != -1) {
         tl_assert(MC_(clo_free_fill) >= 0x00 && MC_(clo_free_fill) <= 0xFF);
         VG_(memset)((void*)p_old, MC_(clo_free_fill), old_szB);
      }

      /* Free old memory */
      /* Nb: we have to allocate a new MC_Chunk for the new memory rather
         than recycling the old one, so that any erroneous accesses to the
         old memory are reported. */
      die_and_free_mem ( tid, old_mc, MC_(Malloc_Redzone_SzB) );

   } else {
      /* Could not allocate new client memory.
         Re-insert the old_mc (with the old ptr) in the HT, as old_mc was
         unconditionally removed at the beginning of the function. */
      VG_(HT_add_node)( MC_(malloc_list), old_mc );
   }

   return (void*)a_new;
}
Ejemplo n.º 7
0
static void drd_start_client_code(const ThreadId tid, const ULong bbs_done)
{
    tl_assert(tid == VG_(get_running_tid)());
    DRD_(thread_set_vg_running_tid)(tid);
}
Ejemplo n.º 8
0
static
IRSB* fr_instrument(VgCallbackClosure* closure,
                    IRSB* sbIn,
                    VexGuestLayout* layout, 
                    VexGuestExtents* vge,
                    IRType gWordTy, IRType hWordTy)
{
   Int        i;
   IRSB*      sbOut;
   IRTypeEnv* tyenv = sbIn->tyenv;
   IRDirty*   di;
   IRType     dataTy;
   IRExpr**   argv;
   IRCAS*     cas;

   // We don't care about mmaps
   if (!clo_mmap)
      return sbIn;

   // From lackey tool
   tl_assert(gWordTy == hWordTy);

   sbOut = deepCopyIRSBExceptStmts(sbIn);

   // Copy verbatim any IR preamble preceding the first IMark
   i = 0;
   while (i < sbIn->stmts_used && sbIn->stmts[i]->tag != Ist_IMark) {
      addStmtToIRSB( sbOut, sbIn->stmts[i] );
      i++;
   }

   for (/*use current i*/; i < sbIn->stmts_used; i++) {
      IRStmt* st = sbIn->stmts[i];
      if (!st || st->tag == Ist_NoOp) continue;

      switch (st->tag) {
         case Ist_NoOp: // Make compiler happy
         case Ist_AbiHint:
         case Ist_Put:
         case Ist_PutI:
         case Ist_MBE:
         case Ist_IMark:
         case Ist_WrTmp:
         case Ist_Exit:
            addStmtToIRSB( sbOut, st );
            break;

         case Ist_Store:
            dataTy = typeOfIRExpr( tyenv, st->Ist.Store.data );
            argv   = mkIRExprVec_2( st->Ist.Store.addr, mkIRExpr_HWord( sizeofIRType( dataTy ) ) );
            di     = unsafeIRDirty_0_N(/*regparms*/2, "trace_store", VG_(fnptr_to_fnentry)( trace_store ), argv);
            addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
            addStmtToIRSB( sbOut, st );
            break;

         case Ist_LLSC:
            if (st->Ist.LLSC.storedata != NULL) {
               dataTy = typeOfIRExpr( tyenv, st->Ist.LLSC.storedata );
               argv   = mkIRExprVec_2( st->Ist.LLSC.addr, mkIRExpr_HWord( sizeofIRType( dataTy ) ) );
               di     = unsafeIRDirty_0_N(/*regparms*/2, "trace_store", VG_(fnptr_to_fnentry)( trace_store ), argv);
               addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
               addStmtToIRSB( sbOut, st );
            }
            break;

         case Ist_Dirty:
            di = st->Ist.Dirty.details;
            if (di->mFx != Ifx_None) {
               // This dirty helper accesses memory.  Collect the details.
               tl_assert(di->mAddr != NULL);
               tl_assert(di->mSize != 0);
               if (di->mFx == Ifx_Write || di->mFx == Ifx_Modify) {
                  argv = mkIRExprVec_2( di->mAddr, mkIRExpr_HWord( di->mSize ) );
                  di   = unsafeIRDirty_0_N( /*regparms*/2, "trace_store", VG_(fnptr_to_fnentry)( trace_store ), argv );
                  addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
               }
            } else {
               tl_assert(di->mAddr == NULL);
               tl_assert(di->mSize == 0);
            }
            addStmtToIRSB( sbOut, st );
            break;

         case Ist_CAS:
            cas = st->Ist.CAS.details;
            tl_assert(cas->addr != NULL);
            tl_assert(cas->dataLo != NULL);
            argv = mkIRExprVec_2( cas->addr, mkIRExpr_HWord( sizeofIRType(typeOfIRExpr(tyenv, cas->dataLo)) * (cas->dataHi != NULL ? 2 : 1) ) );
            di   = unsafeIRDirty_0_N( /*regparms*/2, "trace_store", VG_(fnptr_to_fnentry)( trace_store ), argv );
            addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
            addStmtToIRSB( sbOut, st );
            break;
      }
   }

   return sbOut;
}
Ejemplo n.º 9
0
// Returns an array or struct variable within varList
// that encompasses the address provided by "a".
// Properties for return value r = &(returnNode.var):
// location(r) <= "a" < location(r) + (r->upperBounds[0] * getBytesBetweenElts(r))
//   [if array]
// location(r) <= "a" < location(r) + (getBytesBetweenElts(r))
//   [if struct]
// where location(.) is the global location if isGlobal and stack location
// based on FP or SP if !isGlobal
// *baseAddr = the base address of the variable returned
static VariableEntry*
returnArrayVariableWithAddr(VarList* varList,
                            Addr a,
                            char isGlobal,
                            FunctionExecutionState* e,
                            Addr* baseAddr) {
  VarNode* cur_node = 0;
  ThreadId tid = VG_(get_running_tid)();
  Addr var_loc = 0;

  FJALAR_DPRINTF("[returnArrayVariableWithAddr] varList: %p, Addr: %p, %s\n", varList, (void *)a, (isGlobal)?"Global":"NonGlobal");
  if (!isGlobal) {
    FJALAR_DPRINTF("frame_ptr: %p, stack_ptr: %p\n", (void *)e->FP, (void *)e->lowSP);
  }

  for (cur_node = varList->first;
       cur_node != 0;
       cur_node = cur_node->next) {
    VariableEntry* potentialVar = cur_node->var;
    Addr potentialVarBaseAddr = 0;

    if (!potentialVar)
      continue;

    FJALAR_DPRINTF("Examining potential var: %s, offset: 0x%x, locType: 0x%x\n",
                    potentialVar->name, potentialVar->byteOffset, potentialVar->locationType);

    if (isGlobal) {
      tl_assert(IS_GLOBAL_VAR(potentialVar));
      potentialVarBaseAddr = potentialVar->globalVar->globalLocation;
      FJALAR_DPRINTF("Examining potential var address: %p\n",(void *) potentialVarBaseAddr);
    } else {
      if (potentialVar->locationType == FP_OFFSET_LOCATION) {
        potentialVarBaseAddr = e->FP + potentialVar->byteOffset;
      } else {  
        // (comment added 2014)  
        // Potential bug!  We are ignoring other locationTypes
        // and just assuming it is ESP.  This is the only case
        // we've seen (i386 only) so far.  (markro)
        potentialVarBaseAddr = e->lowSP + potentialVar->byteOffset;
      }
    }
    if (potentialVar->location_expression_size) {
      unsigned int i = 0;
      for(i = 0; i < potentialVar->location_expression_size; i++ ) {
        dwarf_location *dloc  = &(potentialVar->location_expression[i]);
        unsigned int  op = dloc->atom;
        int reg_val;

        if(op == DW_OP_addr) {
          // DWARF supplied address
          var_loc = dloc->atom_offset;

        } else if(op == DW_OP_deref) {
          // Dereference result of last DWARF operation
          tl_assert(var_loc);
          var_loc = *(Addr *)var_loc;

        } else if((op >= DW_OP_const1u) && (op <= DW_OP_consts)) {
          // DWARF supplied constant
          var_loc = dloc->atom_offset;

        } else if((op >= DW_OP_plus) && (op <= DW_OP_plus_uconst)) {
          // Add DWARF supplied constant to value to result of last DWARF operation
          var_loc += dloc->atom_offset;

        } else if((op >= DW_OP_reg0) && (op <= DW_OP_reg31)) {
          // Get value located in architectural register
          reg_val = (*get_reg[dloc->atom - DW_OP_reg0])(tid);
          FJALAR_DPRINTF("\tObtaining register value: [%%%s]: %x\n", dwarf_reg_string[dloc->atom - DW_OP_reg0], reg_val);
          var_loc = (Addr)&reg_val;

        } else if((op >= DW_OP_breg0) && (op <= DW_OP_breg31)) {
          // Get value pointed to by architectural register
          reg_val = (*get_reg[dloc->atom - DW_OP_breg0])(tid);
          FJALAR_DPRINTF("\tObtaining register value: [%%%s]: %x\n", dwarf_reg_string[dloc->atom - DW_OP_breg0], reg_val);
          var_loc = reg_val + dloc->atom_offset;
          FJALAR_DPRINTF("\tAdding %lld to the register value for %p\n", dloc->atom_offset, (void *)var_loc);
          tl_assert(var_loc);

        } else if(op == DW_OP_fbreg) {
          // Get value located at an offset from the FRAME_BASE.
          FJALAR_DPRINTF("atom offset: %lld vs. byteOffset: %d\n", dloc->atom_offset, potentialVar->byteOffset);
          var_loc = e->FP + dloc->atom_offset;

        } else {
          // There's a fair number of DWARF operations still unsupported. There is a full list
          // in fjalar_debug.h
          FJALAR_DPRINTF("\tUnsupported DWARF stack OP: %s\n", location_expression_to_string(op));
          tl_assert(0);
        }
        FJALAR_DPRINTF("\tApplying DWARF Stack Operation %s - %p\n",location_expression_to_string(op), (void *)var_loc);
      }
    }
    FJALAR_DPRINTF("addr: %p, potential var_loc: %p, staticArr: %p, ptrLevels: %d, varType: %d\n",
                   (void*)a, (void*)potentialVarBaseAddr, potentialVar->staticArr, potentialVar->ptrLevels,
                   (potentialVar->varType ? potentialVar->varType->decType : 0));

    // array
    if (IS_STATIC_ARRAY_VAR(potentialVar) &&
        (potentialVarBaseAddr <= a) &&
        (a < (potentialVarBaseAddr + (potentialVar->staticArr->upperBounds[0] *
                                      getBytesBetweenElts(potentialVar))))) {

      FJALAR_DPRINTF("returnArrayVar: found matching array with upperBounds[0]: %d\n", potentialVar->staticArr->upperBounds[0]);
      *baseAddr = potentialVarBaseAddr;
      return potentialVar;
    }
    // struct
    else if (VAR_IS_BASE_STRUCT(potentialVar) &&
             (potentialVarBaseAddr <= a) &&
             (a < (potentialVarBaseAddr + getBytesBetweenElts(potentialVar)))) {
      return searchForArrayWithinStruct(potentialVar,
                                        potentialVarBaseAddr,
                                        a, baseAddr);
    }
  }

  *baseAddr = 0;
  return 0;
}
Ejemplo n.º 10
0
static Trace_Block* alloc_trace(ThreadId tid)
{
   static Addr ips[MAX_TRACE];
   static Trace_Hash* hash_entries[MAX_TRACE];
   Addr* ips_ptr;
   UInt n_ips, n_ips_count;
   Trace_Hash* hash_entry;
   Trace_Hash** hash_entry_ptr;
   Trace_Hash** max_skip;
   Trace_Block* parent;
   Trace_Block* block;

   n_ips = VG_(get_StackTrace)(tid, ips, clo_trace, NULL, NULL, 0);
   tl_assert(n_ips > 0);

   // Get first non-skiped block
   ips_ptr = ips;
   hash_entry_ptr = hash_entries;
   max_skip = NULL;
   n_ips_count = n_ips;
   do {
      hash_entry = VG_(HT_lookup)(trace_hash, *ips_ptr);
      if (!hash_entry) {
         hash_entry = VG_(malloc)("freya.alloc_trace.1", sizeof(Trace_Hash));
         hash_entry->ips = *ips_ptr;
         hash_entry->skip = 0;
         hash_entry->block = NULL;
         hash_entry->parent = NULL;
         check_address(*ips_ptr, hash_entry);
         VG_(HT_add_node)(trace_hash, hash_entry);
         block = NULL;
      }
      *hash_entry_ptr++ = hash_entry;
      if (hash_entry->skip)
         max_skip = hash_entry_ptr; // Which is one step ahead
      n_ips_count--;
      ips_ptr++;
   } while (n_ips_count > 0);

   ips_ptr = ips;
   hash_entry_ptr = hash_entries;
   if (max_skip) {
      if (max_skip - hash_entries == n_ips)
         max_skip--; // At least one should always remain
      n_ips -= max_skip - hash_entries;
      ips_ptr += max_skip - hash_entries;
      hash_entry_ptr += max_skip - hash_entries;
   }

   if (n_ips > clo_report)
      n_ips = clo_report;

   tl_assert(n_ips > 0);

   // Insert to the chain
   parent = (*hash_entry_ptr)->parent;
   do {
      hash_entry = *hash_entry_ptr++;
      block = hash_entry->block;
      while (block) {
         tl_assert(block->ips == *ips_ptr);
         if (block->parent == parent)
            break;
         block = block->hash_next;
      }

      if (!block) {
         block = VG_(malloc)("freya.alloc_trace.2", sizeof(Trace_Block));
         block->parent = parent;
         if (parent) {
            block->next = parent->first;
            parent->first = block;
         } else {
            block->next = trace_head;
            trace_head = block;
         }
         block->first = NULL;

         block->hash_next = hash_entry->block;
         hash_entry->block = block;

         block->allocs = 0;
         block->total = 0;
         block->current = 0;
         block->peak = 0;
         block->ips = *ips_ptr;
         block->name = NULL;
      }

      parent = block;
      n_ips--;
      ips_ptr++;
   } while (n_ips > 0);

   return block;
}
Ejemplo n.º 11
0
// Nb: --ignore-fn is tricky for realloc.  If the block's original alloc was
// ignored, but the realloc is not requested to be ignored, and we are
// shrinking the block, then we have to ignore the realloc -- otherwise we
// could end up with negative heap sizes.  This isn't a danger if we are
// growing such a block, but for consistency (it also simplifies things) we
// ignore such reallocs as well.
static void* renew_block ( ThreadId tid, void* p_old, SizeT new_req_szB )
{
   HP_Chunk* hc;
   void*     p_new;
   SizeT     old_req_szB, old_slop_szB, new_slop_szB, new_actual_szB;
   Trace_Block* block;

   // Remove the old block
   hc = VG_(HT_remove)(malloc_list, (UWord)p_old);
   if (hc == NULL) {
      VG_(printf)("Invalid realloc: %p\n", p_old);
      return NULL;   // must have been a bogus realloc()
   }

   if (tid != hc->tid)
       cross_thread_free(tid, hc->block);

   block = hc->block;
   while (block) {
      block->current -= hc->req_szB;
      block->allocs ++;
      block->total += new_req_szB;
      block->current += new_req_szB;
      if (block->peak < block->current)
         block->peak = block->current;
      block = block->parent;
   }

   old_req_szB  = hc->req_szB;
   old_slop_szB = hc->slop_szB;

   // Actually do the allocation, if necessary.
   if (new_req_szB <= old_req_szB + old_slop_szB) {
      // New size is smaller or same;  block not moved.
      p_new = p_old;
      new_slop_szB = old_slop_szB + (old_req_szB - new_req_szB);

   } else {
      // New size is bigger;  make new block, copy shared contents, free old.
      p_new = VG_(cli_malloc)(VG_(clo_alignment), new_req_szB);
      if (!p_new) {
         // Nb: if realloc fails, NULL is returned but the old block is not
         // touched.  What an awful function.
         return NULL;
      }
      VG_(memcpy)(p_new, p_old, old_req_szB);
      VG_(cli_free)(p_old);
      new_actual_szB = VG_(malloc_usable_size)(p_new);
      tl_assert(new_actual_szB >= new_req_szB);
      new_slop_szB = new_actual_szB - new_req_szB;
   }

   // Update HP_Chunk.
   hc->data     = (Addr)p_new;
   hc->req_szB  = new_req_szB;
   hc->slop_szB = new_slop_szB;
   hc->tid      = tid;

   // Now insert the new hc (with a possibly new 'data' field) into
   // malloc_list.
   VG_(HT_add_node)(malloc_list, hc);
   return p_new;
}
Ejemplo n.º 12
0
static void fr_sort_and_dump(Trace_Block* block, Int indent)
{
   Int i;
   Trace_Block* from;
   Trace_Block* from_prev;
   Trace_Block* max;
   Trace_Block* max_prev;
   Trace_Block* it;
   Trace_Block* it_prev;

   tl_assert((!block->parent && trace_head == block) || (block->parent && block->parent->first == block));

   if (block->parent && block->next == NULL) {
      // One child, no need to sort
      if (block->peak < clo_min)
         return;

      for (i = 0; i < indent; ++i)
         VG_(printf)("  ");
      fr_print_block(block);

      if (block->first)
         fr_sort_and_dump(block->first, indent + 1);
      return;
   }

   // Sort by total (min sort)
   from_prev = NULL;
   from = block;
   while (from) {
      max_prev = NULL;
      max = from;
      it_prev = from;
      it = from->next;
      while (it) {
         if (it->peak > max->peak) {
            max_prev = it_prev;
            max = it;
         }
         it_prev = it;
         it = it->next;
      }

      if (max != from) {
         if (max != from->next) {
            tl_assert(max_prev != from);
            it = max->next;
            max->next = from->next;
            from->next = it;
            max_prev->next = from;
         } else {
            tl_assert(max_prev == from);
            from->next = max->next;
            max->next = from;
         }

         if (from_prev)
            from_prev->next = max;
         else {
            if (from->parent)
                from->parent->first = max;
            else
                trace_head = max;
            block = max;
         }
      }
      from_prev = max;
      from = max->next;
   }

   while (block) {
      if (block->peak < clo_min)
         return;

      for (i = 0; i < indent; ++i)
         VG_(printf)("  ");

      VG_(printf)("[%d] ", indent);
      fr_print_bytes("Peak: ", block->peak);
      VG_(printf)("Allocs: %d ", block->allocs);
      fr_print_bytes("Total: ", block->total);
      if (block->current > 0)
         fr_print_bytes("Leak: ", block->current);
      VG_(printf)("\n");

      for (i = 0; i < indent; ++i)
         VG_(printf)("  ");
      fr_print_block(block);

      if (block->first)
         fr_sort_and_dump(block->first, indent + 1);

      block = block->next;
   }
}
Ejemplo n.º 13
0
static void fr_post_clo_init(void)
{
   Rule_List* last_rule_ptr = NULL;
   Char* read_ptr;
   Trace_Block* block = NULL;
   Trace_Block* parent = NULL;
   Int* indents = (int*)dir_buffer;
   Int indent;
   Int depth = -1;
   Bool is_group;
   SysRes sres;
   Int fd;
   OffT file_size;

   if (clo_mmap) {
#if VG_WORDSIZE == 4
      mmap_section.next = NULL;
      mmap_section.page_addr = 0;
      mmap_section.trace_blocks = VG_(calloc)("freya.fr_post_clo_init.2", PAGE_NUMBER, sizeof(Trace_Block*));
      mmap_section.used_blocks = VG_(calloc)("freya.fr_post_clo_init.3", PAGE_NUMBER, sizeof(Char));
#else
      mmap_sections = VG_(calloc)("freya.fr_post_clo_init.1", 1, sizeof(Mmap_Section));
      mmap_sections->next = NULL;
      mmap_sections->page_addr = 0;
      mmap_sections->trace_blocks = VG_(calloc)("freya.fr_post_clo_init.2", PAGE_NUMBER, sizeof(Trace_Block*));
      mmap_sections->used_blocks = VG_(calloc)("freya.fr_post_clo_init.3", PAGE_NUMBER, sizeof(Char));
      mmap_section_cache = mmap_sections;
#endif
   }

   read_ptr = NULL;
   if (clo_config) {
      sres = VG_(open)(clo_config, VKI_O_RDONLY, 0);
      if (!sr_isError(sres)) {
         fd = (Int) sr_Res(sres);

         file_size = VG_(lseek)(fd, 0, VKI_SEEK_END);
         VG_(lseek)(fd, 0, VKI_SEEK_SET);

         if (clo_fr_verb)
            VG_(printf)("File '%s' (size: %ld bytes) is successfully opened.\n", clo_config, file_size);

         read_ptr = VG_(malloc)("freya.fr_post_clo_init.3", (file_size + 1) * sizeof(Char));
         VG_(read)(fd, read_ptr, file_size);
         read_ptr[file_size] = '\0';

         VG_(close) (fd);
      }
      else if (clo_fr_verb)
         VG_(printf)("Cannot open '%s'. (Fallback to default config)\n", clo_config);
   }
   else if (clo_fr_verb)
      VG_(printf)("No config file provided. (Fallback to default config)\n");

   if (!read_ptr) {
      // Duplicate
      read_ptr = VG_(malloc)("freya.fr_post_clo_init.4", (VG_(strlen)(default_rule) + 1) * sizeof(Char));
      VG_(strcpy)(read_ptr, default_rule);
   }

   while (*read_ptr) {
      // Parsing the next line, first skip spaces
      indent = 0;
      while (*read_ptr == ' ') {
         indent++;
         read_ptr++;
      }

      // Skip comments and empty lines
      if (*read_ptr == '#' || *read_ptr == '\r' || *read_ptr == '\n') {
         while (*read_ptr != '\0' && *read_ptr != '\r' && *read_ptr != '\n')
            read_ptr++;

         if (*read_ptr) {
            read_ptr++;
            continue;
         }
      }

      if (*read_ptr == '{') {
         read_ptr = parse_extra_rule(read_ptr, block);
         continue;
      } else if (*read_ptr != '[' && *read_ptr != '(') {
         read_ptr = parse_rule(read_ptr, &last_rule_ptr);
         continue;
      }

      is_group = *read_ptr == '[';

      block = VG_(malloc)("freya.fr_post_clo_init.4", sizeof(Trace_Block));
      read_ptr++;
      block->name = read_ptr;

      while (!(!is_group && *read_ptr == ')') && !(is_group && *read_ptr == ']')) {
         tl_assert2(*read_ptr && *read_ptr != '\n' && *read_ptr != '\r', "unterminated ( or [");
         read_ptr++;
      }
      tl_assert2(block->name != read_ptr, "node has no name");

      *read_ptr = '\0';
      if (!is_group)
         search_rule(block, block->name, read_ptr - block->name);
      read_ptr++;

      if (*read_ptr == '+') {
         tl_assert2(default_parent == NULL, "Only one default node is allowed");
         default_parent = block;
         read_ptr++;
      }

      while (*read_ptr == ' ')
         read_ptr++;
      tl_assert2(*read_ptr == '\n' || *read_ptr == '\r' || !*read_ptr, "Garbage at the end of the line");

      if (clo_fr_verb)
         VG_(printf)("%s '%s' %s\n", is_group ? "Group:" : "Group & Attach:", block->name, default_parent == block ? "(Default)" : "");

      if (depth >= 0) {
         if (indents[depth] != indent) {
            if (indent > indents[depth]) {
               tl_assert2(depth < 63, "Maximum allowed depth is 63 for the tree");
               depth++;
               indents[depth] = indent;
               if (parent)
                  parent = parent->first;
               else
                  parent = trace_head;
            } else {
               do {
                  tl_assert2(depth != 0, "Wrong tree indentation");
                  depth--;
                  tl_assert(parent);
                  parent = parent->parent;
               } while (indent != indents[depth]);
               tl_assert((depth == 0 && !parent) || (depth > 0 && parent));
            }
         }
      } else {
         // The indentation of the top element
         tl_assert(!parent);
         indents[0] = indent;
         depth = 0;
      }

      block->parent = parent;
      if (parent) {
         block->next = parent->first;
         parent->first = block;
      } else {
         block->next = trace_head;
         trace_head = block;
      }
      block->first = NULL;

      block->hash_next = NULL;

      block->allocs = 0;
      block->total = 0;
      block->current = 0;
      block->peak = 0;
      block->ips = 0;
   }

   remove_unused_rules();
}
Ejemplo n.º 14
0
static Char* parse_rule(Char* read_ptr, Rule_List** last_rule_ptr)
{
   Rule_List* rule;
   Rule_List* rule_ptr;

   rule = VG_(malloc)("freya.parse_rule.1", sizeof(Rule_List));

   rule->next = NULL;
   rule->parent = NULL;

   if (*read_ptr != '-') {
      rule->name = read_ptr;
      while (*read_ptr != ' ') {
         tl_assert2(*read_ptr && *read_ptr != '\n' && *read_ptr != '\r' && *read_ptr != ')', "Rule must start with hypen or a name followed by a space");
         read_ptr++;
      }
      rule->name_len = read_ptr - rule->name;
      *read_ptr = '\0';

      // Must have a unique name
      rule_ptr = rule_head;
      while (rule_ptr) {
         if (rule_ptr->name_len == rule->name_len && VG_(memcmp)(rule->name, rule_ptr->name, rule->name_len * sizeof(Char)) == 0) {
            VG_(printf)("Redefined rule %s. Rule names must be unique!\n", rule->name);
            tl_assert(0);
         }
         rule_ptr = rule_ptr->next;
      }
   } else {
      rule->name = NULL;
      rule->name_len = 0;
   }
   read_ptr++;

   // Enque this new rule
   if (*last_rule_ptr)
      (*last_rule_ptr)->next = rule;
   else
      rule_head = rule;
   *last_rule_ptr = rule;

   while (*read_ptr == ' ')
      read_ptr++;

   if (*read_ptr == '(' || *read_ptr == '{') {
      rule->is_namespace = *read_ptr == '{';
      read_ptr++;

      rule->func_name = read_ptr;
      while (!(!rule->is_namespace && *read_ptr == ')') && !(rule->is_namespace && *read_ptr == '}')) {
         tl_assert2(*read_ptr && *read_ptr != '\n' && *read_ptr != '\r', "unterminated ( or {");
         read_ptr++;
      }
      rule->func_name_len = read_ptr - rule->func_name;
      tl_assert2(rule->func_name_len > 0, "missing function or namespace name");
      *read_ptr = '\0';
      read_ptr++;

      while (*read_ptr == ' ')
         read_ptr++;
   } else {
      rule->func_name = NULL;
      rule->func_name_len = 0;
      rule->is_namespace = False;
   }

   rule->path = read_ptr;
   while (*read_ptr && *read_ptr != '\n' && *read_ptr != '\r')
      read_ptr++;
   rule->path_len = read_ptr - rule->path;
   if (rule->path_len == 0)
      rule->path = NULL;
   else if (*read_ptr) {
      *read_ptr = '\0';
      read_ptr++;
   }

   if (clo_fr_verb)
      VG_(printf)("Rule: '%s' (%ld) %s: '%s' (%ld) Path: '%s' (%ld)\n",
         rule->name, rule->name_len,
         rule->is_namespace ? "Namesp" : "Func", rule->func_name, rule->func_name_len,
         rule->path, rule->path_len);
   return read_ptr;
}
Ejemplo n.º 15
0
void DRD_(set_check_stack_accesses)(const Bool c)
{
   tl_assert(c == False || c == True);
   s_check_stack_accesses = c;
}
Ejemplo n.º 16
0
// Takes a location and a VariableEntry and tries to determine
// the UPPER BOUND of the array which the pointer refers to.
// CAUTION: This function is still fairly primitive and untested
//
// This now uses a two-pass scheme which first searches to the end of the
// array and then goes backwards until it finds the first byte whose V-bit
// is valid so that it can avoid printing out tons of garbage values and
// cluttering up the .dtrace file.
//
// This also now has support to find statically-sized arrays within structs
// declared as global and local variables as well as statically-sized arrays
// which are themselves global and local variables
int returnArrayUpperBoundFromPtr(VariableEntry* var, Addr varLocation)
{
  VariableEntry* targetVar = 0;
  Addr baseAddr = 0;
  char foundGlobalArrayVariable = 0;

  FJALAR_DPRINTF("Checking for upper bound of %p\n", (void *)varLocation);

  // 1. Search if varLocation is within a global variable
  if (addressIsGlobal(varLocation)) {
    targetVar = returnArrayVariableWithAddr(&globalVars,
                                            varLocation,
                                            1, 0, &baseAddr);

    if (targetVar) {
      foundGlobalArrayVariable = 1;
    }
    else {
      // UNCONDITIONALLY RETURN 0 IF WE CANNOT FIND A GLOBAL ARRAY
      // VARIABLE.  WE DO NOT WANT TO PROBE IN THE GLOBAL SPACE
      // BECAUSE ALL OF IT MAY POSSIBLY BE INITIALIZED.

      //      targetVar = returnGlobalSingletonWithAddress(varLocation);
      //      if (targetVar) {
      return 0;
        //      }
    }
  }
  // 2. If not found, then search if varLocation is within the stack
  //    frame of a function currently on the stack
  if (!targetVar) {
    FunctionExecutionState* e;
    FJALAR_DPRINTF("Not found in globals area, checking on stack\n");

    e = returnFunctionExecutionStateWithAddress(varLocation);

    FJALAR_DPRINTF("Found function entry %p\n", e);

    if (e) {
      VarList* localArrayAndStructVars = &(e->func->localArrayAndStructVars);
      FJALAR_DPRINTF(" e->FP is %p\n", (void *)e->FP);
      FJALAR_DPRINTF(" localArrayAndSTructVars: %p, numVars: %d\n", localArrayAndStructVars, localArrayAndStructVars->numVars);

      tl_assert(!localArrayAndStructVars || (Addr)localArrayAndStructVars > 0x100);

      if (localArrayAndStructVars &&
          // hopefully ensures that it's not totally bogus
          ((Addr)localArrayAndStructVars > 0x100) &&
          (localArrayAndStructVars->numVars > 0)) {
        targetVar = returnArrayVariableWithAddr(localArrayAndStructVars,
                                                varLocation,
                                                0, e, &baseAddr);
      }
    }
  }

  // 3. If still not found, then search the heap for varLocation
  //    if it is lower than the current frame pointer
  // This is a last-ditch desperation attempt and won't yield valid-looking
  // results in cases like when you have a pointer to an int which is located
  // within a struct malloc'ed on the heap.
  if (!targetVar) {
    FJALAR_DPRINTF("Not found on stack, checking in heap\n");

    tl_assert(curFunctionExecutionStatePtr);

    FJALAR_DPRINTF("Checking if the variable is on the stack:\n");
    FJALAR_DPRINTF("\tCurrent Stackframe: [%p - %p]\n", (void*)curFunctionExecutionStatePtr->FP, (void*)curFunctionExecutionStatePtr->lowestSP);

    // Make sure the address is not in the stack or global region
    // before probing so that we don't accidentally make a mistake
    // where we erroneously conclude that the array size is HUGE
    // since all areas on the stack and global regions are ALLOCATED
    // so probing won't do us much good
    if ((varLocation < curFunctionExecutionStatePtr->lowestSP) &&
        !addressIsGlobal(varLocation)) {
      int size;
      FJALAR_DPRINTF("Location looks reasonable, probing at %p\n",
             (void *)varLocation);

      size =
        probeAheadDiscoverHeapArraySize(varLocation,
                                        getBytesBetweenElts(var));

      FJALAR_DPRINTF("Size is %d\n", size);

      // We want an upper-bound on the array, not the actual size
      if (size > 0)
        return (size - 1);
      else
        return 0;
    }
  }
  // This is a less strict match which only compares rep. types
  // ... we will do more checking later to really determine the relative sizes.
  // This leniency allows an int* to reference a char[] and so forth ...
  // see below for translation
  //  else if (baseAddr &&
  //           (targetVar->varType->repType == var->varType->repType)) {

  // (comment added 2005)  
  // TODO: Hmmmm, what are we gonna do without repTypes???  I need to
  // investigate this 'if' condition more carefully later:
  else if (baseAddr) {
    int targetVarSize = 0;
    int targetVarBytesBetweenElts = getBytesBetweenElts(targetVar);
    int varBytesBetweenElts = getBytesBetweenElts(var);
    Addr highestAddr;

    tl_assert(IS_STATIC_ARRAY_VAR(targetVar));
    FJALAR_DPRINTF("varLocation: %p\n", (void *)varLocation);

    highestAddr = baseAddr + ((targetVar->staticArr->upperBounds[0]) * targetVarBytesBetweenElts);

    FJALAR_DPRINTF("baseAddr is: %p, highestAddr is %p\n", (void *)baseAddr, (void *)highestAddr);

    // NEW!: Probe backwards until you find the first address whose V-bit is SET:
    // but ONLY do this for globals and NOT for stuff on the stack because
    // V-bits for stack variables are FLAKY!!!  During function exit, all the V-bits
    // are wiped out :(

    if (foundGlobalArrayVariable) {
      while ((highestAddr > varLocation) &&
              (MC_Ok != mc_check_readable(highestAddr, targetVarBytesBetweenElts, 0))) {
        highestAddr -= targetVarBytesBetweenElts;
      }
    }

    // This is IMPORTANT that we subtract from varLocation RATHER than baseAddr
    // because of the fact that varLocation can point to the MIDDLE of an array
    targetVarSize = (highestAddr - varLocation) / targetVarBytesBetweenElts;
    FJALAR_DPRINTF("targetVarBytesBetweenElts is %d, varBytesBetweenElts is %d, targetVarSize is %d\n",
                    targetVarBytesBetweenElts, varBytesBetweenElts, targetVarSize);


    FJALAR_DPRINTF("Target : [%s - %d] Source : [%s - %d]\n",
                    targetVar->varType->typeName, targetVarBytesBetweenElts,
                    var->varType->typeName, varBytesBetweenElts);

    if (targetVarBytesBetweenElts == varBytesBetweenElts) {
      return targetVarSize;
    } else {
      return (targetVarSize * targetVarBytesBetweenElts) / varBytesBetweenElts;
    }

  }
  return 0;
}
Ejemplo n.º 17
0
void DRD_(set_first_race_only)(const Bool fro)
{
   tl_assert(fro == False || fro == True);
   s_first_race_only = fro;
}
Ejemplo n.º 18
0
/**
 * Look up the address of the struct barrier_info associated with the
 * client-side barrier object.
 */
static struct barrier_info* DRD_(barrier_get)(const Addr barrier)
{
   tl_assert(offsetof(DrdClientobj, barrier) == 0);
   return &(DRD_(clientobj_get)(barrier, ClientBarrier)->barrier);
}
Ejemplo n.º 19
0
void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
{
   MC_Mempool*  mp;
   MC_Chunk*    mc;
   ThreadId     tid = VG_(get_running_tid)();
   UInt         n_shadows, i;
   VgHashNode** chunks;

   if (VG_(clo_verbosity) > 2) {
      VG_(message)(Vg_UserMsg, "mempool_trim(0x%lx, 0x%lx, %ld)\n",
                               pool, addr, szB);
      VG_(get_and_pp_StackTrace) (tid, MEMPOOL_DEBUG_STACKTRACE_DEPTH);
   }

   mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
   if (mp == NULL) {
      MC_(record_illegal_mempool_error)(tid, pool);
      return;
   }

   check_mempool_sane(mp);
   chunks = VG_(HT_to_array) ( mp->chunks, &n_shadows );
   if (n_shadows == 0) {
     tl_assert(chunks == NULL);
     return;
   }

   tl_assert(chunks != NULL);
   for (i = 0; i < n_shadows; ++i) {

      Addr lo, hi, min, max;

      mc = (MC_Chunk*) chunks[i];

      lo = mc->data;
      hi = mc->szB == 0 ? mc->data : mc->data + mc->szB - 1;

#define EXTENT_CONTAINS(x) ((addr <= (x)) && ((x) < addr + szB))

      if (EXTENT_CONTAINS(lo) && EXTENT_CONTAINS(hi)) {

         /* The current chunk is entirely within the trim extent: keep
            it. */

         continue;

      } else if ( (! EXTENT_CONTAINS(lo)) &&
                  (! EXTENT_CONTAINS(hi)) ) {

         /* The current chunk is entirely outside the trim extent:
            delete it. */

         if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
            MC_(record_free_error)(tid, (Addr)mc->data);
            VG_(free)(chunks);
            if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
            return;
         }
         die_and_free_mem ( tid, mc, mp->rzB );  

      } else {

         /* The current chunk intersects the trim extent: remove,
            trim, and reinsert it. */

         tl_assert(EXTENT_CONTAINS(lo) ||
                   EXTENT_CONTAINS(hi));
         if (VG_(HT_remove)(mp->chunks, (UWord)mc->data) == NULL) {
            MC_(record_free_error)(tid, (Addr)mc->data);
            VG_(free)(chunks);
            if (MP_DETAILED_SANITY_CHECKS) check_mempool_sane(mp);
            return;
         }

         if (mc->data < addr) {
           min = mc->data;
           lo = addr;
         } else {
           min = addr;
           lo = mc->data;
         }

         if (mc->data + szB > addr + szB) {
           max = mc->data + szB;
           hi = addr + szB;
         } else {
           max = addr + szB;
           hi = mc->data + szB;
         }

         tl_assert(min <= lo);
         tl_assert(lo < hi);
         tl_assert(hi <= max);

         if (min < lo && !EXTENT_CONTAINS(min)) {
           MC_(make_mem_noaccess)( min, lo - min);
         }

         if (hi < max && !EXTENT_CONTAINS(max)) {
           MC_(make_mem_noaccess)( hi, max - hi );
         }

         mc->data = lo;
         mc->szB = (UInt) (hi - lo);
         VG_(HT_add_node)( mp->chunks, mc );        
      }

#undef EXTENT_CONTAINS
      
   }
   check_mempool_sane(mp);
   VG_(free)(chunks);
}
Ejemplo n.º 20
0
/**
 * Initialize a barrier with given client address, barrier type and number of
 * participants. The 'reinitialization' argument indicates whether a barrier
 * object is being initialized or reinitialized.
 *
 * Called before pthread_barrier_init().
 */
void DRD_(barrier_init)(const Addr barrier,
                        const BarrierT barrier_type, const Word count,
                        const Bool reinitialization)
{
   struct barrier_info* p;

   tl_assert(barrier_type == pthread_barrier || barrier_type == gomp_barrier);

   if (count == 0)
   {
      BarrierErrInfo bei = { DRD_(thread_get_running_tid)(), barrier, 0, 0 };
      VG_(maybe_record_error)(VG_(get_running_tid)(),
                              BarrierErr,
                              VG_(get_IP)(VG_(get_running_tid)()),
                              "pthread_barrier_init: 'count' argument is zero",
                              &bei);
   }

   if (! reinitialization && barrier_type == pthread_barrier)
   {
      p = DRD_(barrier_get)(barrier);
      if (p)
      {
         BarrierErrInfo bei = { DRD_(thread_get_running_tid)(), barrier, 0, 0 };
         VG_(maybe_record_error)(VG_(get_running_tid)(),
                                 BarrierErr,
                                 VG_(get_IP)(VG_(get_running_tid)()),
                                 "Barrier reinitialization",
                                 &bei);
      }
   }

   p = DRD_(barrier_get_or_allocate)(barrier, barrier_type, count);

   if (s_trace_barrier) {
      if (reinitialization)
         DRD_(trace_msg)("[%d] barrier_reinit    %s 0x%lx count %ld -> %ld",
                         DRD_(thread_get_running_tid)(),
                         barrier_get_typename(p), barrier, p->count, count);
      else
         DRD_(trace_msg)("[%d] barrier_init      %s 0x%lx",
                         DRD_(thread_get_running_tid)(),
                         barrier_get_typename(p),
                         barrier);
   }

   if (reinitialization && p->count != count)
   {
      if (p->pre_waiters_left != p->count || p->post_waiters_left != p->count)
      {
         BarrierErrInfo bei = { DRD_(thread_get_running_tid)(), p->a1, 0, 0 };
         VG_(maybe_record_error)(VG_(get_running_tid)(),
                                 BarrierErr,
                                 VG_(get_IP)(VG_(get_running_tid)()),
                                 "Reinitialization of barrier with active"
                                 " waiters",
                                 &bei);
      }
      p->count = count;
   }
}
Ejemplo n.º 21
0
static Bool handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret)
{
   UWord result = 0;
   const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();

   tl_assert(vg_tid == VG_(get_running_tid()));
   tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_tid) == drd_tid);

   switch (arg[0])
   {
   case VG_USERREQ__MALLOCLIKE_BLOCK:
      if (DRD_(g_free_is_write)) {
         GenericErrInfo GEI = {
            .tid = DRD_(thread_get_running_tid)(),
            .addr = 0,
         };
         VG_(maybe_record_error)(vg_tid,
                                 GenericErr,
                                 VG_(get_IP)(vg_tid),
                                 "--free-is-write=yes is incompatible with"
                                 " custom memory allocator client requests",
                                 &GEI);
      }
      if (arg[1])
         DRD_(malloclike_block)(vg_tid, arg[1]/*addr*/, arg[2]/*size*/);
      break;

   case VG_USERREQ__RESIZEINPLACE_BLOCK:
      if (!DRD_(freelike_block)(vg_tid, arg[1]/*addr*/, False))
      {
         GenericErrInfo GEI = {
            .tid = DRD_(thread_get_running_tid)(),
            .addr = 0,
         };
         VG_(maybe_record_error)(vg_tid,
                                 GenericErr,
                                 VG_(get_IP)(vg_tid),
                                 "Invalid VG_USERREQ__RESIZEINPLACE_BLOCK request",
                                 &GEI);
      }
      DRD_(malloclike_block)(vg_tid, arg[1]/*addr*/, arg[3]/*newSize*/);
      break;

   case VG_USERREQ__FREELIKE_BLOCK:
      if (arg[1] && ! DRD_(freelike_block)(vg_tid, arg[1]/*addr*/, False))
      {
         GenericErrInfo GEI = {
	    .tid = DRD_(thread_get_running_tid)(),
	    .addr = 0,
	 };
         VG_(maybe_record_error)(vg_tid,
                                 GenericErr,
                                 VG_(get_IP)(vg_tid),
                                 "Invalid VG_USERREQ__FREELIKE_BLOCK request",
                                 &GEI);
      }
      break;

   case VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID:
      result = vg_tid;
      break;

   case VG_USERREQ__DRD_GET_DRD_THREAD_ID:
      result = drd_tid;
      break;

   case VG_USERREQ__DRD_SET_THREAD_NAME:
      DRD_(thread_set_name)(drd_tid, (const HChar*)arg[1]);
      break;

   case VG_USERREQ__DRD_START_SUPPRESSION:
      /*_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED*/
   case VG_USERREQ_TOOL_BASE('H','G') + 256 + 39:
      DRD_(start_suppression)(arg[1], arg[1] + arg[2], "client");
      break;

   case VG_USERREQ__DRD_FINISH_SUPPRESSION:
      /*_VG_USERREQ__HG_ARANGE_MAKE_TRACKED*/
   case VG_USERREQ_TOOL_BASE('H','G') + 256 + 40:
      DRD_(finish_suppression)(arg[1], arg[1] + arg[2]);
      break;

   case VG_USERREQ__DRD_ANNOTATE_HAPPENS_BEFORE:
      DRD_(hb_happens_before)(drd_tid, arg[1]);
      break;

   case VG_USERREQ__DRD_ANNOTATE_HAPPENS_AFTER:
      DRD_(hb_happens_after)(drd_tid, arg[1]);
      break;

   case VG_USERREQ__DRD_ANNOTATE_RWLOCK_CREATE:
      if (arg[1])
      {
         struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
         if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
            break;
      }
      DRD_(rwlock_pre_init)(arg[1], user_rwlock);
      break;

   case VG_USERREQ__DRD_ANNOTATE_RWLOCK_DESTROY:
      if (arg[1])
      {
         struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
         if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
            break;
      }
      DRD_(rwlock_post_destroy)(arg[1], user_rwlock);
      break;

   case VG_USERREQ__DRD_ANNOTATE_RWLOCK_ACQUIRED:
      if (arg[1])
      {
         struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
         if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
            break;
      }
      tl_assert(arg[2] == !! arg[2]);
      if (arg[2])
      {
         DRD_(rwlock_pre_wrlock)(arg[1], user_rwlock);
         DRD_(rwlock_post_wrlock)(arg[1], user_rwlock, True);
      }
      else
      {
         DRD_(rwlock_pre_rdlock)(arg[1], user_rwlock);
         DRD_(rwlock_post_rdlock)(arg[1], user_rwlock, True);
      }
      break;

   case VG_USERREQ__DRD_ANNOTATE_RWLOCK_RELEASED:
      if (arg[1])
      {
         struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
         if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
            break;
      }
      tl_assert(arg[2] == !! arg[2]);
      DRD_(rwlock_pre_unlock)(arg[1], user_rwlock);
      break;

   case VG_USERREQ__SET_PTHREAD_COND_INITIALIZER:
      DRD_(pthread_cond_initializer) = (Addr)arg[1];
      DRD_(pthread_cond_initializer_size) = arg[2];
      break;

   case VG_USERREQ__DRD_START_NEW_SEGMENT:
      DRD_(thread_new_segment)(DRD_(PtThreadIdToDrdThreadId)(arg[1]));
      break;

   case VG_USERREQ__DRD_START_TRACE_ADDR:
      DRD_(start_tracing_address_range)(arg[1], arg[1] + arg[2], False);
      break;

   case VG_USERREQ__DRD_STOP_TRACE_ADDR:
      DRD_(stop_tracing_address_range)(arg[1], arg[1] + arg[2]);
      break;

   case VG_USERREQ__DRD_RECORD_LOADS:
      DRD_(thread_set_record_loads)(drd_tid, arg[1]);
      break;

   case VG_USERREQ__DRD_RECORD_STORES:
      DRD_(thread_set_record_stores)(drd_tid, arg[1]);
      break;

   case VG_USERREQ__SET_PTHREADID:
      // pthread_self() returns 0 for programs not linked with libpthread.so.
      if (arg[1] != INVALID_POSIX_THREADID)
         DRD_(thread_set_pthreadid)(drd_tid, arg[1]);
      break;

   case VG_USERREQ__SET_JOINABLE:
   {
      const DrdThreadId drd_joinable = DRD_(PtThreadIdToDrdThreadId)(arg[1]);
      if (drd_joinable != DRD_INVALID_THREADID)
         DRD_(thread_set_joinable)(drd_joinable, (Bool)arg[2]);
      else {
         InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
         VG_(maybe_record_error)(vg_tid,
                                 InvalidThreadId,
                                 VG_(get_IP)(vg_tid),
                                 "pthread_detach(): invalid thread ID",
                                 &ITI);
      }
      break;
   }

   case VG_USERREQ__ENTERING_PTHREAD_CREATE:
      DRD_(thread_entering_pthread_create)(drd_tid);
      break;

   case VG_USERREQ__LEFT_PTHREAD_CREATE:
      DRD_(thread_left_pthread_create)(drd_tid);
      break;

   case VG_USERREQ__POST_THREAD_JOIN:
   {
      const DrdThreadId thread_to_join = DRD_(PtThreadIdToDrdThreadId)(arg[1]);
      if (thread_to_join == DRD_INVALID_THREADID)
      {
         InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
         VG_(maybe_record_error)(vg_tid,
                                 InvalidThreadId,
                                 VG_(get_IP)(vg_tid),
                                 "pthread_join(): invalid thread ID",
                                 &ITI);
      }
      else
      {
         DRD_(thread_post_join)(drd_tid, thread_to_join);
      }
      break;
   }

   case VG_USERREQ__PRE_THREAD_CANCEL:
   {
      const DrdThreadId thread_to_cancel =DRD_(PtThreadIdToDrdThreadId)(arg[1]);
      if (thread_to_cancel == DRD_INVALID_THREADID)
      {
         InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
         VG_(maybe_record_error)(vg_tid,
                                 InvalidThreadId,
                                 VG_(get_IP)(vg_tid),
                                 "pthread_cancel(): invalid thread ID",
                                 &ITI);
      }
      else
      {
         DRD_(thread_pre_cancel)(thread_to_cancel);
      }
      break;
   }

   case VG_USERREQ__POST_THREAD_CANCEL:
      break;

   case VG_USERREQ__PRE_MUTEX_INIT:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(mutex_init)(arg[1], arg[2]);
      break;

   case VG_USERREQ__POST_MUTEX_INIT:
      DRD_(thread_leave_synchr)(drd_tid);
      break;

   case VG_USERREQ__PRE_MUTEX_DESTROY:
      DRD_(thread_enter_synchr)(drd_tid);
      break;

   case VG_USERREQ__POST_MUTEX_DESTROY:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(mutex_post_destroy)(arg[1]);
      break;

   case VG_USERREQ__PRE_MUTEX_LOCK:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(mutex_pre_lock)(arg[1], arg[2], arg[3]);
      break;

   case VG_USERREQ__POST_MUTEX_LOCK:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(mutex_post_lock)(arg[1], arg[2], False/*post_cond_wait*/);
      break;

   case VG_USERREQ__PRE_MUTEX_UNLOCK:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(mutex_unlock)(arg[1], arg[2]);
      break;

   case VG_USERREQ__POST_MUTEX_UNLOCK:
      DRD_(thread_leave_synchr)(drd_tid);
      break;

   case VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(spinlock_init_or_unlock)(arg[1]);
      break;

   case VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK:
      DRD_(thread_leave_synchr)(drd_tid);
      break;

   case VG_USERREQ__PRE_COND_INIT:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(cond_pre_init)(arg[1]);
      break;

   case VG_USERREQ__POST_COND_INIT:
      DRD_(thread_leave_synchr)(drd_tid);
      break;

   case VG_USERREQ__PRE_COND_DESTROY:
      DRD_(thread_enter_synchr)(drd_tid);
      break;

   case VG_USERREQ__POST_COND_DESTROY:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(cond_post_destroy)(arg[1], arg[2]);
      break;

   case VG_USERREQ__PRE_COND_WAIT:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
      {
         const Addr cond = arg[1];
         const Addr mutex = arg[2];
         const MutexT mutex_type = arg[3];
         DRD_(mutex_unlock)(mutex, mutex_type);
         DRD_(cond_pre_wait)(cond, mutex);
      }
      break;

   case VG_USERREQ__POST_COND_WAIT:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
      {
         const Addr cond = arg[1];
         const Addr mutex = arg[2];
         const Bool took_lock = arg[3];
         DRD_(cond_post_wait)(cond);
         DRD_(mutex_post_lock)(mutex, took_lock, True);
      }
      break;

   case VG_USERREQ__PRE_COND_SIGNAL:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(cond_pre_signal)(arg[1]);
      break;

   case VG_USERREQ__POST_COND_SIGNAL:
      DRD_(thread_leave_synchr)(drd_tid);
      break;

   case VG_USERREQ__PRE_COND_BROADCAST:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(cond_pre_broadcast)(arg[1]);
      break;

   case VG_USERREQ__POST_COND_BROADCAST:
      DRD_(thread_leave_synchr)(drd_tid);
      break;

   case VG_USERREQ__PRE_SEM_INIT:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(semaphore_init)(arg[1], arg[2], arg[3]);
      break;

   case VG_USERREQ__POST_SEM_INIT:
      DRD_(thread_leave_synchr)(drd_tid);
      break;

   case VG_USERREQ__PRE_SEM_DESTROY:
      DRD_(thread_enter_synchr)(drd_tid);
      break;

   case VG_USERREQ__POST_SEM_DESTROY:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(semaphore_destroy)(arg[1]);
      break;

   case VG_USERREQ__PRE_SEM_OPEN:
      DRD_(thread_enter_synchr)(drd_tid);
      break;

   case VG_USERREQ__POST_SEM_OPEN:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(semaphore_open)(arg[1], (HChar*)arg[2], arg[3], arg[4], arg[5]);
      break;

   case VG_USERREQ__PRE_SEM_CLOSE:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(semaphore_close)(arg[1]);
      break;

   case VG_USERREQ__POST_SEM_CLOSE:
      DRD_(thread_leave_synchr)(drd_tid);
      break;

   case VG_USERREQ__PRE_SEM_WAIT:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(semaphore_pre_wait)(arg[1]);
      break;

   case VG_USERREQ__POST_SEM_WAIT:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(semaphore_post_wait)(drd_tid, arg[1], arg[2]);
      break;

   case VG_USERREQ__PRE_SEM_POST:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(semaphore_pre_post)(drd_tid, arg[1]);
      break;

   case VG_USERREQ__POST_SEM_POST:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(semaphore_post_post)(drd_tid, arg[1], arg[2]);
      break;

   case VG_USERREQ__PRE_BARRIER_INIT:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(barrier_init)(arg[1], arg[2], arg[3], arg[4]);
      break;

   case VG_USERREQ__POST_BARRIER_INIT:
      DRD_(thread_leave_synchr)(drd_tid);
      break;

   case VG_USERREQ__PRE_BARRIER_DESTROY:
      DRD_(thread_enter_synchr)(drd_tid);
      break;

   case VG_USERREQ__POST_BARRIER_DESTROY:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(barrier_destroy)(arg[1], arg[2]);
      break;

   case VG_USERREQ__PRE_BARRIER_WAIT:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(barrier_pre_wait)(drd_tid, arg[1], arg[2]);
      break;

   case VG_USERREQ__POST_BARRIER_WAIT:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(barrier_post_wait)(drd_tid, arg[1], arg[2], arg[3], arg[4]);
      break;

   case VG_USERREQ__PRE_RWLOCK_INIT:
      DRD_(rwlock_pre_init)(arg[1], pthread_rwlock);
      break;

   case VG_USERREQ__POST_RWLOCK_DESTROY:
      DRD_(rwlock_post_destroy)(arg[1], pthread_rwlock);
      break;

   case VG_USERREQ__PRE_RWLOCK_RDLOCK:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(rwlock_pre_rdlock)(arg[1], pthread_rwlock);
      break;

   case VG_USERREQ__POST_RWLOCK_RDLOCK:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(rwlock_post_rdlock)(arg[1], pthread_rwlock, arg[2]);
      break;

   case VG_USERREQ__PRE_RWLOCK_WRLOCK:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(rwlock_pre_wrlock)(arg[1], pthread_rwlock);
      break;

   case VG_USERREQ__POST_RWLOCK_WRLOCK:
      if (DRD_(thread_leave_synchr)(drd_tid) == 0)
         DRD_(rwlock_post_wrlock)(arg[1], pthread_rwlock, arg[2]);
      break;

   case VG_USERREQ__PRE_RWLOCK_UNLOCK:
      if (DRD_(thread_enter_synchr)(drd_tid) == 0)
         DRD_(rwlock_pre_unlock)(arg[1], pthread_rwlock);
      break;

   case VG_USERREQ__POST_RWLOCK_UNLOCK:
      DRD_(thread_leave_synchr)(drd_tid);
      break;

   case VG_USERREQ__DRD_CLEAN_MEMORY:
      if (arg[2] > 0)
         DRD_(clean_memory)(arg[1], arg[2]);
      break;

   case VG_USERREQ__HELGRIND_ANNOTATION_UNIMP:
      {
         /* Note: it is assumed below that the text arg[1] points to is never
          * freed, e.g. because it points to static data.
          */
         UnimpClReqInfo UICR =
            { DRD_(thread_get_running_tid)(), (HChar*)arg[1] };
         VG_(maybe_record_error)(vg_tid,
                                 UnimpHgClReq,
                                 VG_(get_IP)(vg_tid),
                                 "",
                                 &UICR);
      }
      break;

   case VG_USERREQ__DRD_ANNOTATION_UNIMP:
      {
         /* Note: it is assumed below that the text arg[1] points to is never
          * freed, e.g. because it points to static data.
          */
         UnimpClReqInfo UICR =
            { DRD_(thread_get_running_tid)(), (HChar*)arg[1] };
         VG_(maybe_record_error)(vg_tid,
                                 UnimpDrdClReq,
                                 VG_(get_IP)(vg_tid),
                                 "",
                                 &UICR);
      }
      break;

   default:
#if 0
      VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
                   arg[0], arg[1]);
      tl_assert(0);
#endif
      return False;
   }

   *ret = result;
   return True;
}
Ejemplo n.º 22
0
/**
 * Deallocate the memory that is owned by members of
 * struct barrier_thread_info.
 */
static void DRD_(barrier_thread_destroy)(struct barrier_thread_info* const p)
{
   tl_assert(p);
   DRD_(sg_put)(p->sg);
   DRD_(sg_put)(p->post_wait_sg);
}
Ejemplo n.º 23
0
struct mutex_info* DRD_(mutex_get)(const Addr mutex)
{
   tl_assert(offsetof(DrdClientobj, mutex) == 0);
   return &(DRD_(clientobj_get)(mutex, ClientMutex)->mutex);
}
Ejemplo n.º 24
0
static VG_REGPARM(0) void helper_instrument_WrTmp_GetI(UInt base, UInt ix, UInt bias, UInt nElems)
{
    UInt index = base+((ix+bias)%nElems);

    tl_assert(get_reg_from_offset(index) == guest_INVALID);
}
Ejemplo n.º 25
0
const HChar* DRD_(mutex_get_typename)(struct mutex_info* const p)
{
   tl_assert(p);

   return DRD_(mutex_type_name)(p->mutex_type);
}
Ejemplo n.º 26
0
/** Initialize the client object set. */
void clientobj_init(void)
{
  tl_assert(s_clientobj == 0);
  s_clientobj = VG_(OSetGen_Create)(0, 0, VG_(malloc), VG_(free));
  tl_assert(s_clientobj);
}
Ejemplo n.º 27
0
int DRD_(mutex_get_recursion_count)(const Addr mutex)
{
   struct mutex_info* const p = DRD_(mutex_get)(mutex);
   tl_assert(p);
   return p->recursion_count;
}
Ejemplo n.º 28
0
static void dinfo_free(void* p) {
   tl_assert(p);
   return VG_(arena_free)(VG_AR_DINFO, p);
}
Ejemplo n.º 29
0
SizeT thread_get_stack_size(const DrdThreadId tid)
{
  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
            && tid != DRD_INVALID_THREADID);
  return s_threadinfo[tid].stack_size;
}
Ejemplo n.º 30
0
/** Called after pthread_barrier_wait() / gomp_barrier_wait(). */
void DRD_(barrier_post_wait)(const DrdThreadId tid, const Addr barrier,
                             const BarrierT barrier_type, const Bool waited,
                             const Bool serializing)
{
   struct barrier_info* p;
   const UWord word_tid = tid;
   struct barrier_thread_info* q;
   struct barrier_thread_info* r;

   p = DRD_(barrier_get)(barrier);

   if (s_trace_barrier)
   {
      VG_(message)(Vg_UserMsg,
                   "[%d/%d] barrier_post_wait %s 0x%lx iteration %ld%s",
                   VG_(get_running_tid)(),
                   tid,
                   p ? barrier_get_typename(p) : "(?)",
                   barrier,
                   p ? p->post_iteration : -1,
                   serializing ? " (serializing)" : "");
   }

   /*
    * If p == 0, this means that the barrier has been destroyed after
    * *_barrier_wait() returned and before this function was called. Just
    * return in that case -- race conditions between *_barrier_wait()
    * and *_barrier_destroy() are detected by the *_barrier_destroy() wrapper.
    */
   if (p == 0)
      return;

   /* If the *_barrier_wait() call returned an error code, exit. */
   if (! waited)
      return;

   q = VG_(OSetGen_Lookup)(p->oset, &word_tid);
   if (q == 0)
   {
      BarrierErrInfo bei = { p->a1, 0, 0 };
      VG_(maybe_record_error)(VG_(get_running_tid)(),
                              BarrierErr,
                              VG_(get_IP)(VG_(get_running_tid)()),
                              "Error in barrier implementation"
                              " -- barrier_wait() started before"
                              " barrier_destroy() and finished after"
                              " barrier_destroy()",
                              &bei);

      q = VG_(OSetGen_AllocNode)(p->oset, sizeof(*q));
      DRD_(barrier_thread_initialize)(q, tid, p->pre_iteration);
      VG_(OSetGen_Insert)(p->oset, q);
      tl_assert(VG_(OSetGen_Lookup)(p->oset, &word_tid) == q);
   }
   /*
    * Combine all vector clocks that were stored in the pre_barrier_wait
    * wrapper with the vector clock of the current thread.
    */
   VG_(OSetGen_ResetIter)(p->oset);
   for ( ; (r = VG_(OSetGen_Next)(p->oset)) != 0; )
   {
      if (r != q)
      {
         tl_assert(r->sg[p->post_iteration]);
         DRD_(thread_combine_vc2)(tid, &r->sg[p->post_iteration]->vc);
      }
   }

   /* Create a new segment and store a pointer to that segment. */
   DRD_(thread_new_segment)(tid);
   DRD_(thread_get_latest_segment)(&q->post_wait_sg, tid);
   s_barrier_segment_creation_count++;

   /*
    * If the same number of threads as the barrier count indicates have
    * called the post *_barrier_wait() wrapper, toggle p->post_iteration and
    * reset the p->post_waiters_left counter.
    */
   if (--p->post_waiters_left <= 0)
   {
      p->post_iteration    = 1 - p->post_iteration;
      p->post_waiters_left = p->count;
   }
}