POINTER smalloc_malloc P1(size_t, size) #endif { /* int i; */ u *temp; DEBUG_CHECK(size == 0, "Malloc size 0.\n"); if (size > SMALL_BLOCK_MAX_BYTES) return large_malloc(size, 0); #if SIZEOF_PTR > SIZEOF_INT if (size < SIZEOF_PTR) size = SIZEOF_PTR; #endif size = (size + 7) & ~3; /* block size in bytes */ #define SIZE_INDEX(u_array, size) (*(u*) ((char*)u_array-8+size)) #define SIZE_PNT_INDEX(u_array, size) (*(u**)((char*)u_array-8+size)) /* i = (size - 8) >> 2; */ count_up(small_alloc_stat, size); SIZE_INDEX(small_count, size) += 1; /* update statistics */ SIZE_INDEX(small_total, size) += 1; if (SIZE_INDEX(small_count, size) > SIZE_INDEX(small_max, size)) SIZE_INDEX(small_max, size) = SIZE_INDEX(small_count, size); if (temp = SIZE_PNT_INDEX(sfltable, size)) { /* allocate from the * free list */ count_back(small_free_stat, size); temp++; SIZE_PNT_INDEX(sfltable, size) = *(u **) temp; fake("From free list."); return (char *) temp; } /* else allocate from the chunk */ if (unused_size < size) { /* no room in chunk, get another */ /* * don't waste this smaller block */ if (unused_size) { count_up(small_free_stat, unused_size); *s_size_ptr(next_unused) = unused_size >> 2; *s_next_ptr(next_unused) = SIZE_PNT_INDEX(sfltable, unused_size); SIZE_PNT_INDEX(sfltable, unused_size) = next_unused; } fake("Allocating new small chunk."); next_unused = (u *) large_malloc(SMALL_CHUNK_SIZE + SIZEOF_PTR, 1); if (next_unused == 0) return 0; *next_unused = (u) last_small_chunk; last_small_chunk = next_unused++; count_up(small_chunk_stat, SMALL_CHUNK_SIZE + SIZEOF_PTR); unused_size = SMALL_CHUNK_SIZE; } else
std::shared_ptr<Node> LODNode::clone(){ std::shared_ptr<LODNode> ret(new LODNode(count_up(mName))); mGlobalMutex.lock(); ret->mID = mGlobalID; mGlobalID++; mGlobalMutex.unlock(); mMutex.lock(); ret->mName = mName; ret->mParentNode = mParentNode; ret->mTranslation = mTranslation; ret->mScale = mScale; ret->mTRS = mTRS; ret->mHigh = mHigh; ret->mMiddle = mMiddle; ret->mLow= mLow; ret->mHighTresh = mHighTresh; ret->mLowTresh = mLowTresh; for(auto iter = mChildNodesID.begin(); iter != mChildNodesID.end(); iter++){ ret->add(iter->second->clone()); } mMutex.unlock(); return ret; }
/* * This function counts between the first and second input. * If the first input is greater, it counts down. * If the first input is less, it counts up. */ void count_between( int input1, int input2 ) { if( input1 > input2 ) { count_down( input1, input2 ); } else { count_up( input1, input2 ); } }
SharedObject& SharedObject::operator=(const SharedObject& ref) { // quick return if the old and new pointers point to the same object if (node == ref.node) return *this; // decrease the counter and delete if this was the last pointer count_down(); // save the new pointer node = ref.node; count_up(); return *this; }
int main( int argc, char **argv ) { int input1; int input2; do{ get_inputs( &input1, &input2 ); count_up( 0, input1 ); count_down( input1, 0 ); count_between( input1, input2 ); } while( quit_or_rerun() ); return 0; }
int main( int argc, char **argv ) { int input = get_input(); count_up( MIN, input ); count_down( input, "hello world" ); return 0; }
void SharedObject::assignNode(SharedObjectNode* node_) { count_down(); node = node_; count_up(); }
SharedObject::SharedObject(const SharedObject& ref) { node = ref.node; count_up(); }