void write() {
	UpLock uplock(mutex_);
	WriteLock write(uplock);

	resource = 100;

	std::cerr << "resource => " << resource << "\n";
	std::cerr << "writen !\n";
}
Ejemplo n.º 2
0
void _sweep(struct mempage_heap * _heap){
	boost::upgrade_lock<boost::shared_mutex> lock(_heap->_free_mu);
	boost::upgrade_lock<boost::shared_mutex> uplock(_heap->_old_recover_mu);
	
	for(unsigned int i = 0; i < _heap->_recover_slide; ){
		if (_isfree(_heap->_recover[i])){
			unsigned int slide = _heap->_free_slide++;
			if (slide >= _heap->_free_max){
				boost::unique_lock<boost::shared_mutex> uniquelock(boost::move(lock));
				_resize_freelist(_heap);
			}
			_heap->_free[slide] = _heap->_recover[i];
			_heap->_free[slide]->slide = sizeof(struct chunk);
			_heap->_free[slide]->rec_count = 0;
			
			_heap->_recover[i] = _heap->_recover[--_heap->_recover_slide];

		}else{
			if (_isoldchunk(_heap->_recover[i])){
				unsigned int slide = _heap->_old_recover_slide++;
				if (slide >= _heap->_old_recover_max){
					boost::unique_lock<boost::shared_mutex> uniquelock(boost::move(uplock));
					_resize_oldrecvlist(_heap);
				}
				_heap->_old_recover[slide] = _heap->_recover[i];

				_heap->_recover[i] = _heap->_recover[--_heap->_recover_slide];

			}else{
				i++;
			}
		}
	}

	if (_heap->_old_recover_slide.load() > 1024){
		boost::unique_lock<boost::shared_mutex> uniquelock(boost::move(uplock));
		for(unsigned int i = 0; i < _heap->_old_recover_slide; ){
			if (_isfree(_heap->_old_recover[i])){
				unsigned int slide = _heap->_free_slide++;
				if (slide >= _heap->_free_max){
					boost::unique_lock<boost::shared_mutex> uniquelock(boost::move(lock));
					_resize_freelist(_heap);
				}
				_heap->_free[slide] = _heap->_recover[i];

				_heap->_old_recover[i] = _heap->_old_recover[--_heap->_old_recover_slide];

			}else{
				i++;
			}
		}
	}
}