void HashExtensible::save()
{
     std::ofstream *file= new ofstream(this->filename.c_str(),ios::trunc);
     
     pair<int,int> auxPair;
     Bucket auxBucket;
     
     pair<Key_Node,Refs> auxKey;
     
     *file<< "Bucket_Capacity"<<endl;
     *file<< this->bucketCapacity <<endl;
     
     for(int i=0; i<directory.size();i++)
     {           
          auxPair=directory.at(i);
          auxBucket=buckets.getBucket(auxPair.second);
          
          for(int j=0; j<auxBucket.size();j++)
          {
                auxKey=auxBucket.at(j);   
                auxKey.first.Serialize(file);
                *file<< "Value"<<endl;
                *file<< auxKey.second.Serialize()<<endl;
	//	*file<< auxKey.second.posReg<<endl;
                *file<< "Value_End"<<endl;              
          }
          
          
     }
   file->close();
   delete(file); 
}
示例#2
0
ZendArray::Bucket *ZendArray::findForInsert(int64 h) const {
  Bucket *p = m_arBuckets[h & m_nTableMask];
  if (UNLIKELY(!p)) return NULL;
  if (LIKELY(!p->hasStrKey() && p->ikey == h)) {
    return p;
  }
  p = p->pNext;
  if (UNLIKELY(!p)) return NULL;
  if (LIKELY(!p->hasStrKey() && p->ikey == h)) {
    return p;
  }
  p = p->pNext;
  int n = 2;
  while (p) {
    if (!p->hasStrKey() && p->ikey == h) {
      return p;
    }
    p = p->pNext;
    n++;
  }
  if (UNLIKELY(n > RuntimeOption::MaxArrayChain)) {
    raise_error("Array is too unbalanced (%d)", n);
  }
  return NULL;
}
示例#3
0
// Refill the long timer wheel by taking all timers from the heap that are due
// to pop in < 1hr.
void TimerStore::refill_long_wheel()
{
  if (!_extra_heap.empty())
  {
    std::pop_heap(_extra_heap.begin(), _extra_heap.end());
    Timer* timer = _extra_heap.back();

    while ((timer != NULL) &&
           (timer->next_pop_time() < _tick_timestamp + LONG_WHEEL_PERIOD_MS))
    {
      // Remove timer from heap
      _extra_heap.pop_back();
      Bucket* bucket = long_wheel_bucket(timer);
      bucket->insert(timer);

      if (!_extra_heap.empty())
      {
        std::pop_heap(_extra_heap.begin(), _extra_heap.end());
        timer = _extra_heap.back();
      }
      else
      {
        timer = NULL;
      }
    }

    // Push the timer back into the heap.
    if (!_extra_heap.empty())
    {
      std::push_heap(_extra_heap.begin(), _extra_heap.end());
    }
  }
}
示例#4
0
bool ZendArray::isVectorData() const {
  int64 index = 0;
  for (Bucket *p = m_pListHead; p; p = p->pListNext) {
    if (p->hasStrKey() || p->ikey != index++) return false;
  }
  return true;
}
示例#5
0
HOT_FUNC_HPHP
bool ZendArray::addLvalImpl(int64 h, Variant **pDest,
                            bool doFind /* = true */) {
  ASSERT(pDest != NULL);
  Bucket *p;
  if (doFind) {
    p = findForInsert(h);
    if (p) {
      *pDest = &p->data;
      return false;
    }
  }
  p = NEW(Bucket)();
  p->setIntKey(h);
  if (pDest) {
    *pDest = &p->data;
  }
  uint nIndex = (h & m_nTableMask);
  CONNECT_TO_BUCKET_LIST(p, m_arBuckets[nIndex]);
  SET_ARRAY_BUCKET_HEAD(m_arBuckets, nIndex, p);
  CONNECT_TO_GLOBAL_DLLIST(p);
  if (h >= m_nNextFreeElement && m_nNextFreeElement >= 0) {
    m_nNextFreeElement = h + 1;
  }
  if (++m_size > tableSize()) {
    resize();
  }
  return true;
}
示例#6
0
文件: main.cpp 项目: alexunder/X-toys
 void Run(int threadIndex) {
     assert(threadIndex < MAX_CORES);
     Bucket *bucket = buckets[threadIndex];
     bucket->Start(x0, y0, x1, y1);
     rast->Rasterize(grids, nIntervals, bucket);
     bucket->Resolve(image, xRes, yRes);
 }
示例#7
0
void numberOfCCsMaxCC(Graph* g, int* numberOfCCs, int* maxCC) {
	List* intList = g->getAllIDs();
	int numberOfCC = 0;
	int numberOfNodes = 0;
	int maxNumCC = 0;
	LinearHashTable *hashTable = g->getHashTable();
	BucketList **bucketListTable = hashTable->getTable();
	for (int i = 0; i < hashTable->getCurrentBucketNo(); i++) {
		BucketList *bucketList = bucketListTable[i];
		BucketListItem *cur = (BucketListItem*) bucketList->getHead();
		while (cur != NULL) {
			Bucket *bucket = cur->getElement();
			for (int j = 0; j < bucket->getSize(); j++) {
				BucketItem *bi = bucket->getBucketItemByIndex(j);
				int startNodeId = bi->getNodeID();

				numberOfNodes = 0;
				bool skip = true;

				IntegerListItem* listNode = (IntegerListItem*) intList->getHead();
				while (listNode != NULL) {
					if (listNode->getInt() == startNodeId) {
						intList->deleteItem(listNode);
						numberOfCC++;
						numberOfNodes++;
						skip = false;
						break;
					}
					listNode = (IntegerListItem*) listNode->getNext();
				}

				if (!skip) {
					//reachNodeN from each node of the graph
					ResultSet *res = reachNodeN(startNodeId, g);
					Pair* pair;
					while (pair = res->next()) {
						IntegerListItem* searchListNode = (IntegerListItem*) intList->getHead();
						while (searchListNode != NULL) {
							if (searchListNode->getInt() == pair->getNodeId()) {
								intList->deleteItem(searchListNode);
								numberOfNodes++;
								break;
							}
							searchListNode = (IntegerListItem*) searchListNode->getNext();
						}

					}
					//delete res;		//free(): invalid next size (fast):
				}
				if (numberOfNodes > maxNumCC)
					maxNumCC = numberOfNodes;
			}
			cur = (BucketListItem*) cur->getNext();
		}
	}
	delete intList;

	*numberOfCCs = numberOfCC;
	*maxCC = maxNumCC;
}
示例#8
0
bool Stasher::get(const void *key, uint32_t klen, buffer &value)
{
    uint32_t hash32;
    Bucket bucket;

    if (!klen)
        return false;

    hash32 = m_hashfunc(key, klen);
    m_array->get(bucket, address(hash32));

    BucketIter iter = bucket.iter();
    buffer keytest;
    while (iter.next())
    {
        if (hash32 == do_hash(iter))
        {
            iter.get_key(keytest);
            if (keytest.size() == klen && !memcmp(&keytest[0], key, klen))
            {
                iter.get_value(value);
                return true;
            }
            else
                keytest.clear();
        }
    }

    return false;
}
void TopologicalMetaData::throw_required( const Bucket & bucket ,
                                          const char * required_by )
{
  static const char method[] = "stk::mesh::get_cell_topology" ;

  std::ostringstream msg ;

  msg << required_by << " Failed to obtain cell topology from "
      << method << "( Bucket[" ;

  const BulkData   & bulk_data = bucket.mesh();
  const MetaData   & meta_data = bulk_data.mesh_meta_data();
  const PartVector & all_parts = meta_data.get_parts();

  const std::pair< const unsigned * , const unsigned * >
    supersets = bucket.superset_part_ordinals();

  for ( const unsigned * j = supersets.first ; j != supersets.second ; ++j ) {
    msg << " " << all_parts[*j]->name();
  }

  msg << " ] )" ;

  throw std::runtime_error( msg.str() );
}
示例#10
0
 void StatFile::flushOneExpBucket()
 {
     //  flush the bucket being shifted out, but nothing else
     Bucket &eb0(expBucket[0]);
     Log(LL_Debug, "istat") << "StatFile::flushOneExpBucket()" << eb0.time();
     if (eb0.time() > 0)
     {
         int64_t ix = mapTimeToBucketIndex(eb0.time());
         int64_t oix = mapTimeToBucketIndex(eb0.time() - fileHeader_->season);
         Bucket *wb = writableBucket(ix);
         Bucket const &o = bucket(oix);
         if (o.time() > 0)
         {
             *wb = o;
             wb->expUpdate(eb0, fileHeader_->lambda);
         }
         else
         {
             *wb = eb0;
         }
     }
     //  must move one over
     memmove(expBucket, &expBucket[1], sizeof(expBucket)-sizeof(expBucket[0]));
     expBucket[sizeof(expBucket)/sizeof(expBucket[0])-1] = Bucket(true);
 }
示例#11
0
bool Partition::add(Entity entity)
{
  TraceIf("stk::mesh::impl::Partition::add", LOG_PARTITION);
  DiagIf(LOG_PARTITION, "Adding entity: " << print_entity_key(MetaData::get(BulkData::get(*m_repository)), m_mesh.entity_key(entity)));
  TraceIfWatchingDec("stk::mesh::impl::Partition::add", LOG_ENTITY, m_mesh.entity_key(entity), extra);

  if (m_mesh.bucket_ptr(entity))
  {
    // If an entity already belongs to a partition, it cannot be added to one.
    return false;
  }

  // If the last bucket is full, automatically create a new one.
  Bucket *bucket = get_bucket_for_adds();

  bucket->add_entity(entity);
  bucket->mesh().modified(entity);
  ++m_size;

  m_updated_since_compress = m_updated_since_sort = true;
  m_mesh.set_synchronized_count(entity, m_mesh.synchronized_count());

  // TODO - Too much tracing in this file, REMOVE
  DiagIfWatching(LOG_ENTITY, m_mesh.entity_key(entity),
                 " Bucket: " << *bucket << ", ordinal: " << m_mesh.bucket_ordinal(entity));
  DiagIf(LOG_PARTITION, "After add, state is: " << *this);

  internal_check_invariants();

  return true;
}
示例#12
0
HOT_FUNC
void ImmutableMap::add(int pos, CVarRef key, CVarRef val, bool unserializeObj) {
  int64_t ikey;
  StringData* skey;
  int32_t hash;
  Bucket* b = buckets() + pos;

  switch (key.getType()) {
    case KindOfInt64: {
      hash = ikey = key.toInt64();
      b->setIntKey(ikey);
      break;
    }
    case KindOfString: {
      skey = StringData::GetStaticString(key.getStringData());
      goto static_case;
    }
    case KindOfStaticString: {
      skey = key.getStringData();
static_case:
      hash = skey->hash();
      b->setStrKey(skey, hash);
      break;
    }
    default: not_reached();
  }
  addVal(pos, hash & m.m_capacity_mask, val, unserializeObj);
}
示例#13
0
bool field_data_valid( const FieldBase & f ,
                       const Bucket & k ,
                       unsigned ord ,
                       const char * required_by )
{
  const MetaData * const k_mesh_meta_data = & MetaData::get(k);
  const MetaData * const f_mesh_meta_data = & MetaData::get(f);
  const bool ok_mesh_meta_data  = k_mesh_meta_data == f_mesh_meta_data ;
  const bool ok_ord     = ord < k.size() ;
  const bool exists     = ok_mesh_meta_data && ok_ord &&
                          NULL != field_data( f , k.begin() );

  if ( required_by && ! exists ) {
    std::ostringstream msg_begin ;
    msg_begin << "For args: " ;
    msg_begin << f << " , " ;
    msg_begin << k << " , " ;
    msg_begin << ord << " , " ;
    msg_begin << required_by ;
    msg_begin << "; operation FAILED with " ;
    ThrowErrorMsgIf( ! ok_mesh_meta_data,
                     msg_begin.str() << " different MetaData");
    ThrowErrorMsgIf( ! ok_ord, msg_begin.str() <<
                     " Ordinal " <<  ord << " >= " << " size " << k.size());
    ThrowErrorMsg( msg_begin.str() << " no data");
  }

  return exists ;
}
示例#14
0
Bucket* Hash::readBucket(int bucketNumber){
	ByteString byteString = this->blockPersistor->readBlock(bucketNumber);

	Bucket* bucket = new Bucket(this->bucketSize);
	bucket->Hidratate(byteString);
	return bucket;
}
示例#15
0
// Delete a timer from the store by ID.
void TimerStore::delete_timer(TimerID id)
{
  std::map<TimerID, Timer*>::iterator it;
  it = _timer_lookup_table.find(id);
  if (it != _timer_lookup_table.end())
  {
    // The timer is still present in the store, delete it.
    Timer* timer = it->second;
    Bucket* bucket;
    size_t num_erased;

    // Delete the timer from the overdue buckets / timer wheels / heap. Try the
    // overdue bucket first, then the short wheel then the long wheel, then
    // finally the heap.
    num_erased = _overdue_timers.erase(timer);

    if (num_erased == 0)
    {
      bucket = short_wheel_bucket(timer);
      num_erased = bucket->erase(timer);

      if (num_erased == 0)
      {
        bucket = long_wheel_bucket(timer);
        num_erased = bucket->erase(timer);

        if (num_erased == 0)
        {
          std::vector<Timer*>::iterator heap_it;
          heap_it = std::find(_extra_heap.begin(), _extra_heap.end(), timer);
          if (heap_it != _extra_heap.end())
          {
            // Timer is in heap, remove it.
            _extra_heap.erase(heap_it, heap_it + 1);
            std::make_heap(_extra_heap.begin(), _extra_heap.end());
          }
          else
          {
            // We failed to remove the timer from any data structure.  Try and
            // purge the timer from all the timer wheels (we're already sure
            // that it's not in the heap).

            // LCOV_EXCL_START
            LOG_ERROR("Failed to remove timer consistently");
            purge_timer_from_wheels(timer);

            // Assert after purging, so we get a nice log detailing how the
            // purge went.
            assert(!"Failed to remove timer consistently");
            // LCOV_EXCL_STOP
          }
        }
      }
    }

    _timer_lookup_table.erase(id);
    delete timer;
  }
}
示例#16
0
void ZendArray::rehash() {
  memset(m_arBuckets, 0, tableSize() * sizeof(Bucket*));
  for (Bucket *p = m_pListHead; p; p = p->pListNext) {
    uint nIndex = (p->hashKey() & m_nTableMask);
    CONNECT_TO_BUCKET_LIST(p, m_arBuckets[nIndex]);
    SET_ARRAY_BUCKET_HEAD(m_arBuckets, nIndex, p);
  }
}
示例#17
0
void BuyDialog::on_buyButton_clicked()
{
    pr->setCount(ui->countSpinBox->value());
    pr->setPrice(pr->getPrice()*pr->getCount());
    Bucket* bk = Bucket::getInstance();
    bk->addProduct(pr);
    this->close();
}
示例#18
0
ZendArray::Bucket *ZendArray::find(int64 h) const {
  for (Bucket *p = m_arBuckets[h & m_nTableMask]; p; p = p->pNext) {
    if (!p->hasStrKey() && p->ikey == h) {
      return p;
    }
  }
  return NULL;
}
示例#19
0
  BucketArray( const field_type & f , const Bucket & k )
  {
    if (k.field_data_size(f)) {
      array_type::assign( (ScalarType*)( k.field_data_location(f,k[0]) ) ,
                          k.size() );

    }
  }
示例#20
0
 BucketArray( const field_type & f , const Bucket & b )
 {
   if ( b.field_data_size(f) ) {
     array_type::assign_stride(
       (ScalarType*)( b.field_data_location(f,b[0]) ),
       b.field_data_stride(f) , (typename array_type::size_type) b.size() );
   }
 }
示例#21
0
/**
 * Private build method, returns 1 if success, 0 othervise
 * uint key: A non-negative key
 * uint payload: A non-negative payload
 * int l: Attempted insertions, has to below R
 */
int SplashTable::insert(uint key, uint payload, int l, int lastBucket){
    int hashedValue;
    
    int minFilled = B; //Count of least filled bucket
    int leastFilledBucket = 0; //Index of least filled bucket
    int hitLastBucket = 0;
    
    //Loops through all hash functions
    for(int i = 0; i<h; i++){
        hashedValue = hashes[i].hash(key); //Hashes key
        
        if(hashedValue == lastBucket){
            hitLastBucket++;
            continue;
        }
        //Tests if minFilled is bigger than current bucket count
        if(buckets[hashedValue].count < minFilled){
            minFilled = buckets[hashedValue].count;
            leastFilledBucket = hashedValue;
        }
    }
    
    if(minFilled == B){
        // they were all full, we need to do a swap and recurse
        // if we've hit our recursion limit, bad news
        if (l==R){
            return 0;
        }
        
        // choose a random bucket
        int randomBucket;
        do{
            //Assigns a random bucket from the possible hash functions
            //Run only once if all hash functions hash to the same bucket
            //OR if found bucket is not the same as last attempt
            randomBucket = hashes[getRandom(0, h-1)].hash(key);
        } while (randomBucket == lastBucket & hitLastBucket != h);
        
        //Retrieves index of oldest bucket
        int index = buckets[randomBucket].getIndexOfOldest();
        
        // swap out the old value and put in the new
        int tempKey = buckets[randomBucket].keys[index];
        int tempPayload = buckets[randomBucket].payload[index];
        
        //Insert the (key, payload) in the oldest bucket
        buckets[randomBucket].insert(key, payload);
        
        //Recursive call, increment l,
        return insert(tempKey, tempPayload, ++l, randomBucket);
    } else {
        // we had room, do the storage
        Bucket *bucket = &buckets[leastFilledBucket];
        bucket->insert(key, payload);
        totalCount++; //increments table count
        return 1;
    }
}
示例#22
0
static void checkBuckets( BulkData& mesh)
{
  const BucketVector & buckets = mesh.buckets(NODE_RANK);
  for (unsigned i=0; i < buckets.size(); i++)
    {
      Bucket* bucket = buckets[i];
      ASSERT_TRUE(bucket->assert_correct());
    }
}
static void checkBuckets( BulkData& mesh)
{
  const std::vector<Bucket*> & buckets = mesh.buckets(0);
  for (unsigned i=0; i < buckets.size(); i++)
    {
      Bucket* bucket = buckets[i];
      STKUNIT_ASSERT(bucket->assert_correct());
    }
}
示例#24
0
Bucket* createBucket(int cellNo, int offsetID) {
	Bucket *ret = new Bucket(cellNo);

	for (int i = 0; i < cellNo; i++) {
		ret->insertBucketItem(createBucketItem(i + (offsetID + 1)));
	}

	return ret;
}
示例#25
0
文件: Bucket.cpp 项目: zdgeorgiev/FMI
Bucket Bucket::operator/(int size)
{
	Bucket newBucket = Bucket(this->owner);

	for (int i = 0; i < this->elementsCount; i++)
		newBucket.addEgg(Egg(this->container[i].getName, this->container[i].getSize / size));

	return newBucket;
}
示例#26
0
/**
 * Calculates the degree distribution of a graph and visualizes it using GNUPLOT.
 * Only considers out_degree.
 */
void degreeDistribution(Graph *g, bool checkFlag) {
	int nodeNo = g->getNodeNo();

	// Initialize array of N-1 elements to zeros ( holding the P(k)s, for k in (0,n-1) ).
	double totalCounters[nodeNo];
	for (int i = 0; i < nodeNo; i++)
		totalCounters[i] = 0;

	// Traverse HashTable and update all counters.
	LinearHashTable *hashTable = g->getHashTable();
	BucketList **bucketListTable = hashTable->getTable();
	for (int i = 0; i < hashTable->getCurrentBucketNo(); i++) {
		BucketList *bucketList = bucketListTable[i];
		BucketListItem *cur = (BucketListItem*) bucketList->getHead();
		while (cur) {
			Bucket *bucket = cur->getElement();
			// Count neighbors and update appropriate counter.
			for (int j = 0; j < bucket->getSize(); j++) {
				BucketItem *bi = bucket->getBucketItemByIndex(j);
				int count = bi->getEdgeList()->getSize();
				totalCounters[count]++;
			}

			cur = (BucketListItem*) cur->getNext();
		}
	}

	// Divide each counter by N.
	for (int i = 0; i < nodeNo; i++)
		totalCounters[i] = totalCounters[i] / nodeNo;

	if (checkFlag == true) {
		assert(int(totalCounters[0] * 10000) == 9898);
		assert(int(totalCounters[98] * 10000) == 101);
	}

	// Make data file.
	ofstream myFile;
	const char *filename = "graph_metrics/gnuplot/bin/data.txt";
	myFile.open(filename);
	if (myFile.fail()) {
		cout << "ERROR: Unable to open " << filename << endl;
		return;
	}

	myFile << "# K       P(K)" << endl;
	for (int i = 0; i < nodeNo; i++)
		myFile << i << "      " << 100 * totalCounters[i] << endl;
	myFile.close();

	// Run gnuplot to display graph.
#ifdef  __CYGWIN__
	system("cd graph_metrics/gnuplot/bin; ./gnuplot.exe degree.gnu");
#else
	system("cd graph_metrics/gnuplot/bin; wine gnuplot.exe degree.gnu");
#endif
}
示例#27
0
HOT_FUNC_HPHP
Variant ZendArray::getKey(ssize_t pos) const {
  ASSERT(pos && pos != ArrayData::invalid_index);
  Bucket *p = reinterpret_cast<Bucket *>(pos);
  if (p->hasStrKey()) {
    return p->skey;
  }
  return (int64)p->ikey;
}
示例#28
0
int HashTable::insert(unsigned int key, int data)
{
    unsigned hashed_key;
    hashed_key = hashFunction(key);
    int index = getBucketIndex(hashed_key, globalDepth); // koitaw ta globaldepth deksia bits gia na dw se poio index tha paw
    Bucket* tempBucket = bucketArray[index];
    if(tempBucket->empty == true)
    {
        cout << "==============================================" << endl;
        cout << "Bucket is empty. Inserting data with key -> " << key << endl;
        cout << "==============================================" << endl;

        tempBucket->insert(key, data);
    }
    else
    {
        if(tempBucket->key == key)
        {
            // #TODO:10 Array of tids
        }
        else
        {
            cout << "==============================================" << endl;
            cout << "Trying to insert different key in full bucket " << endl;
            cout << "==============================================" << endl;

            // #DONE:40 Update with local and globalDepth
            unsigned bhashed_key = hashFunction(tempBucket->key);  // Bucket hashed key
            while(getBucketIndex(bhashed_key, globalDepth) == getBucketIndex(hashed_key, globalDepth))
            {
                doubleTableSize();
                cout << "DOUBLEING" << endl;
            }
            int index2 = getBucketIndex(bhashed_key, globalDepth);

            cout << "To neo index tou yparxodos bucket: " << index2 << endl;
            cout << "EKEI POU THA PAW = " << index2 << endl;
            cout << "EKEI POU HMOYN = " << index << endl;

            index = getBucketIndex(hashed_key, globalDepth);

            cout << "To neo index tou neou bucket: " << index << endl;
            if(index != index2)
            {
                bucketArray[index] = new Bucket(key, data, globalDepth);     // #DONE:10 New constructor
                bucketArray[index2]->localDepth++;
            }
            else
            {
                cout << "Error: HashTable::insert" << endl;
                return -1;
            }
        }
    }
    return 1;
}
示例#29
0
BucketList* createBucketList(int nodeNo, int cellNo) {
	BucketList *bucketList = new BucketList(cellNo);
	Bucket *temp;
	for (int i = 0; i < nodeNo; i++) {
		temp = createBucket(cellNo, i * cellNo);
		for (int j = 0; j < temp->getSize(); j++)
			bucketList->insert(temp->getBucketItemByIndex(j));
	}
	return bucketList;
}
示例#30
0
Variant ZendArray::key() const {
  if (m_pos) {
    Bucket *p = reinterpret_cast<Bucket *>(m_pos);
    if (p->hasStrKey()) {
      return p->skey;
    }
    return (int64)p->ikey;
  }
  return null;
}