Esempio n. 1
0
int dmap_set(DMAP *dmap, u32_t no, double key)
{
    int ret = -1;

    if(dmap)
    {
       RWLOCK_WRLOCK(dmap->rwlock);
       dmap_vset(dmap, no, key);
       if(dmap->vmap[no].off  < 0)
       {
           dmap_insert(dmap, no, key);
       }
       else
       {
           if(key != dmap->vmap[no].val)
           {
                dmap_remove(dmap, no);
                dmap_insert(dmap, no, key);
           }
       }
       ret = 0;
       dmap->vmap[no].val = key;
       RWLOCK_UNLOCK(dmap->rwlock);
    }
    return ret;
}
Esempio n. 2
0
int lkv_set(LKV *lkv, u32_t no, int64_t key)
{
    int ret = -1;

    if(lkv)
    {
       RWLOCK_WRLOCK(lkv->rwlock);
       lkv_vset(lkv, no, key);
#ifdef __LKV_USE_IDX__
       if(lkv->vmap[no].off  < 0)
       {
           lkv_insert(lkv, no, key);
       }
       else
       {
           if(key != lkv->vmap[no].val)
           {
                lkv_remove(lkv, no);
                lkv_insert(lkv, no, key);
           }
       }
#endif
       lkv->vmap[no].val = key;
       ret = 0;
       RWLOCK_UNLOCK(lkv->rwlock);
    }
    return ret;
}
Esempio n. 3
0
int imap_set(IMAP *imap, u32_t no, int32_t key)
{
    int ret = -1;

    if(imap)
    {
       RWLOCK_WRLOCK(imap->rwlock);
       imap_vset(imap, no, key);
       if(imap->vmap[no].off  < 0)
       {
           imap_insert(imap, no, key);
       }
       else
       {
           if(key != imap->vmap[no].val)
           {
                imap_remove(imap, no);
                imap_insert(imap, no, key);
           }
       }
       ret = 0;
       imap->vmap[no].val = key;
       RWLOCK_UNLOCK(imap->rwlock);
    }
    return ret;
}
Esempio n. 4
0
int lkv_del(LKV *lkv, u32_t no)
{
    int ret = -1, n = 0;

    if(lkv)
    {
        RWLOCK_WRLOCK(lkv->rwlock);
        if((n = (lkv->vsize/sizeof(LVVV))) > 0 && no < n)
        {
            lkv_remove(lkv, no);
            lkv->vmap[no].off = -1;
            ret = 0;
        }
        RWLOCK_UNLOCK(lkv->rwlock);
    }
    return ret;
}
Esempio n. 5
0
int dmap_del(DMAP *dmap, u32_t no)
{
    int ret = -1, n = 0;

    if(dmap)
    {
        RWLOCK_WRLOCK(dmap->rwlock);
        if((n = (dmap->vsize/sizeof(DMMV))) > 0 && no < n)
        {
            dmap_remove(dmap, no);
            dmap->vmap[no].off = -1;
            ret = 0;
        }
        RWLOCK_UNLOCK(dmap->rwlock);
    }
    return ret;
}
Esempio n. 6
0
int imap_del(IMAP *imap, u32_t no)
{
    int ret = -1, n = 0;

    if(imap)
    {
        RWLOCK_WRLOCK(imap->rwlock);
        if((n = (imap->vsize/sizeof(IMMV))) > 0 && no < n)
        {
            imap_remove(imap, no);
            imap->vmap[no].off = -1;
            ret = 0;
        }
        RWLOCK_UNLOCK(imap->rwlock);
    }
    return ret;
}
Esempio n. 7
0
static int
monitor_pthread_wrlock (p_lock_t lock)
{
  return RWLOCK_WRLOCK (&(lock->mutex));
}
Esempio n. 8
0
void Trace::process() {
  MUTEX_LOCK(backing_mutex_);
  EntryNumber entry_count = *((EntryNumber*)backing_);  // don't let this change under me
  MUTEX_UNLOCK(backing_mutex_);

  if (entries_done_ >= entry_count) return;       // handle the > case better

  remap_backing(sizeof(struct change)*entry_count); // what if this fails?

  printf("on %u going from %u to %u...", trace_index_, entries_done_, entry_count);
  fflush(stdout);

#ifndef _WIN32
  struct timeval tv_start, tv_end;
  gettimeofday(&tv_start, NULL);
#endif

  // clamping
  if ((entries_done_ + 1000000) < entry_count) {
    entry_count = entries_done_ + 1000000;
  }

  while (entries_done_ != entry_count) {
    // no need to lock this here, because this is the only thread that changes it
    const struct change *c = &backing_[entries_done_];
    char type = get_type_from_flags(c->flags);

    RWLOCK_WRLOCK(db_lock_);
    // clnum_to_entry_number_, instruction_pages_
    if (type == 'I') {
      if (clnum_to_entry_number_.size() < c->clnum) {
        // there really shouldn't be holes
        clnum_to_entry_number_.resize(c->clnum);
      }
      clnum_to_entry_number_.push_back(entries_done_);
      pages_[c->address & PAGE_MASK] |= PAGE_INSTRUCTION;
    }

    // addresstype_to_clnums_
    // ** this is 75% of the perf, real unordered_map should improve, but c++11 is hard to build
    pair<unordered_map<pair<Address, char>, set<Clnum> >::iterator, bool> ret =
      addresstype_to_clnums_.insert(MP(MP(c->address, type), set<Clnum>()));
    ret.first->second.insert(c->clnum);

    // registers_
    if (type == 'W' && (c->address < (unsigned int)(register_size_ * register_count_))) {
      registers_[c->address / register_size_][c->clnum] = c->data;
    }

    // memory_, data_pages_
    if (type == 'L' || type == 'S') {
      if (type == 'L') {
        pages_[c->address & PAGE_MASK] |= PAGE_READ;
      }
      if (type == 'S') {
        pages_[c->address & PAGE_MASK] |= PAGE_WRITE;
      }

      // no harm in doing the memory commit every time there's a load, right?
      int byte_count = (c->flags&SIZE_MASK)/8;
      uint64_t data = c->data;
      if (is_big_endian_) {
        for (int i = byte_count-1; i >= 0; --i) {
          commit_memory(c->clnum, c->address+i, data&0xFF);
          data >>= 8;
        }
      } else {
        // little endian
        for (int i = 0; i < byte_count; i++) {
          commit_memory(c->clnum, c->address+i, data&0xFF);
          data >>= 8;
        }
      }
    }
    RWLOCK_WRUNLOCK(db_lock_);

    // max_clnum_
    if (max_clnum_ < c->clnum && c->clnum != INVALID_CLNUM) {
      max_clnum_ = c->clnum;
    }

    if (min_clnum_ == INVALID_CLNUM || c->clnum < min_clnum_) {
      min_clnum_ = c->clnum;
    }
    
    entries_done_++;
  }