FIFOAllocator::Page *FIFOAllocator::alloc_page_()
 {
   Page *ret = NULL;
   if (total_limit_ >= ATOMIC_ADD(&allocated_size_, page_size_))
   {
     free_list_.pop(ret);
     if (NULL == ret)
     {
       ret = (Page*)ob_malloc(page_size_ + sizeof(Page), mod_id_);
       if (NULL == ret)
       {
         TBSYS_LOG(WARN, "alloc from system fail size=%ld", page_size_ + sizeof(Page));
       }
     }
     if (NULL != ret)
     {
       ret->ref_cnt = 0;
       ret->pos = 0;
     }
     else
     {
       ATOMIC_ADD(&allocated_size_, -page_size_);
     }
   }
   else
   {
     ATOMIC_ADD(&allocated_size_, -page_size_);
   }
   return ret;
 }
示例#2
0
void ParticleEngine::_step(ThreadArg &arg) {
	clock_t last = TIME_MS();
	uint fid = (uint) arg.id;
	uint particlePerThread = m_part_nbr / m_threadNb;
	uint start = fid * particlePerThread;
	uint end = (fid + 1) * particlePerThread;

	(*this.*mParticleStep)(start, end);

	if (arg.id == 0) {
		++m_frameNb;
		if (m_hasWave)
			moveWave();
		if (m_randomCursor > -LIMIT_Y + m_randomSpeed)
			m_randomCursor += m_randomSpeed;
//        if (m_part_nbr < m_max_part_nbr - 100 && m_isAuto)
//        	m_part_nbr += 100;
	}
	++(arg.frame_nb);
	clock_t current = TIME_MS();
	clock_t delta = current - last;

	if (delta < MAX_FRAME_TIME_MS && delta >= 0) // will slow if the frame is too fast
			{
		//LOGI("CPU %d ms too fast\n", MAX_FRAME_TIME_MS - delta);
		//Utils::atomicAdd(&m_timeSlept, (MAX_FRAME_TIME_MS - delta));
		int frame = MAX_FRAME_TIME_MS - delta;
		ATOMIC_ADD(&m_timeSlept, frame);
		usleep(1000 * frame);
	}

}
 void *FIFOAllocator::alloc(const int64_t size)
 {
   void *ret = NULL;
   uint64_t id = UINT64_MAX;
   if (!inited_)
   {
     TBSYS_LOG(WARN, "have not inited, this=%p", this);
   }
   else if (0 >= size
            || page_size_ < size + (int64_t)sizeof(id))
   {
     TBSYS_LOG(WARN, "invalid param, size=%ld", size);
   }
   else
   {
     Page *page = get_page_(size + sizeof(id), id);
     if (NULL != page)
     {
       *(uint64_t*)(page->buf + page->pos) = id;
       ret = page->buf + page->pos + sizeof(id);
       ATOMIC_ADD(&(page->ref_cnt), 1);
       page->pos += (uint32_t)(size + sizeof(id));
     }
   }
   return ret;
 }
示例#4
0
static void *easy_mempool_alloc_(easy_mempool_t *pool, uint32_t size, uint32_t align_size)
{
    void                    *ret = NULL;
    int32_t                 alloc_size = size + sizeof(easy_mempool_buf_t);
    alloc_size = easy_mempool_align(alloc_size, align_size);

    if (NULL != pool) {
        if ((pool->mem_total + size) > pool->mem_limit) {
            // memory over limit
        } else if (pool->page_size < alloc_size) {
            easy_mempool_buf_t      *buf = (easy_mempool_buf_t *)pool->allocator->memalign(align_size, alloc_size);

            if (NULL != buf) {
                buf->magic_num = EASY_MEMPOOL_BUF_MAGIC_NUM;
                buf->alloc_type = EASY_MEMPOOL_DIRECT_ALLOC;
                buf->size = size;
                ret = (char *)buf + sizeof(easy_mempool_buf_t);
                ATOMIC_INC(&(pool->direct_alloc_cnt));
            }
        } else {
            easy_mempool_page_t     *page = NULL;
            easy_mempool_buf_t      *buf = NULL;
            int32_t                 page_pos = -1;

            while (1) {
                if (NULL == (page = easy_mempool_get_cur_page_(pool, alloc_size, &page_pos))) {
                    break;
                }

                buf = (easy_mempool_buf_t *)easy_mempool_align_ptr(easy_mempool_alloc_from_page_(page, pool->page_size, alloc_size), align_size);

                if (NULL != buf) {
                    ATOMIC_INC(&(pool->page_metas[page_pos].ref_cnt));
                }

                easy_mempool_deref_page_(pool, page_pos);

                if (NULL != buf) {
                    buf->magic_num = EASY_MEMPOOL_BUF_MAGIC_NUM;
                    buf->alloc_type = EASY_MEMPOOL_ALLOC;
                    buf->page_pos = page_pos;
                    buf->size = size;
                    ret = (char *)buf + sizeof(easy_mempool_buf_t);
                    break;
                }
            }
        }

        if (NULL != ret) {
            ATOMIC_ADD(&(pool->mem_total), size);
        }
    }

    return ret;
}
 void FIFOAllocator::free_page_(Page *ptr)
 {
   if (NULL != ptr)
   {
     ATOMIC_ADD(&allocated_size_, -page_size_);
     if (hold_limit_ <= (free_list_.get_total() * page_size_)
         || OB_SUCCESS != free_list_.push(ptr))
     {
       ob_free(ptr);
     }
   }
 }
示例#6
0
static int
async_callback(WT_ASYNC_CALLBACK *cb,
    WT_ASYNC_OP *op, int wiredtiger_error, uint32_t flags)
{
	ASYNC_KEYS *asynckey = (ASYNC_KEYS *)cb;
	WT_ASYNC_OPTYPE type;
	WT_ITEM k, v;
	const char *key, *value;
	uint64_t id;
	int ret;

	(void)flags;				/* Unused */

	ret = 0;

	/*! [async get type] */
	/* Retrieve the operation's WT_ASYNC_OPTYPE type. */
	type = op->get_type(op);
	/*! [async get type] */

	/*! [async get identifier] */
	/* Retrieve the operation's 64-bit identifier. */
	id = op->get_id(op);
	/*! [async get identifier] */

	/* Check for a WiredTiger error. */
	if (wiredtiger_error != 0) {
		fprintf(stderr,
		    "ID %" PRIu64 " error %d: %s\n",
		    id, wiredtiger_error,
		    wiredtiger_strerror(wiredtiger_error));
		global_error = wiredtiger_error;
		return (1);
	}

	/* If doing a search, retrieve the key/value pair. */
	if (type == WT_AOP_SEARCH) {
		/*! [async get the operation's string key] */
		ret = op->get_key(op, &k);
		key = k.data;
		/*! [async get the operation's string key] */
		/*! [async get the operation's string value] */
		ret = op->get_value(op, &v);
		value = v.data;
		/*! [async get the operation's string value] */
		ATOMIC_ADD(asynckey->num_keys, 1);
		printf("Id %" PRIu64 " got record: %s : %s\n", id, key, value);
	}
	return (ret);
}
 void FIFOAllocator::revert_page_(const uint64_t id, void *ptr)
 {
   Page *page = NULL;
   id_map_.get(id, page);
   if (NULL == page)
   {
     TBSYS_LOG(ERROR, "get page fail, maybe double free, ptr=%p id=%lu", ptr, id);
   }
   else
   {
     if (0 == ATOMIC_ADD(&(page->ref_cnt), -1))
     {
       id_map_.erase(id);
       free_page_(page);
     }
   }
 }
示例#8
0
static int
cb_asyncop(WT_ASYNC_CALLBACK *cb, WT_ASYNC_OP *op, int ret, uint32_t flags)
{
	ASYNC_KEYS *asynckey = (ASYNC_KEYS *)cb;
	WT_ASYNC_OPTYPE type;
	WT_ITEM k, v;
	const char *key, *value;
	uint64_t id;
	int t_ret;

	(void)flags;
	/*! [Get type] */
	type = op->get_type(op);
	/*! [Get type] */
	/*! [Get identifier] */
	id = op->get_id(op);
	/*! [Get identifier] */
	t_ret = 0;
	if (ret != 0) {
		printf("ID %" PRIu64 " error %d\n", id, ret);
		global_error = ret;
		return (1);
	}
	if (type == WT_AOP_SEARCH) {
		/*! [Get the operation's string key] */
		t_ret = op->get_key(op, &k);
		key = k.data;
		/*! [Get the operation's string key] */
		/*! [Get the operation's string value] */
		t_ret = op->get_value(op, &v);
		value = v.data;
		ATOMIC_ADD(asynckey->num_keys, 1);
		/*! [Get the operation's string value] */
		printf("Id %" PRIu64 " got record: %s : %s\n", id, key, value);
	}
	return (t_ret);
}
示例#9
0
void *easy_mempool_thread_realloc(void *ptr, size_t size)
{
    void                    *ret = NULL;
    void                    *alloc_ptr = NULL;

    easy_mempool_thread_info_t *thread_info = (easy_mempool_thread_info_t *)pthread_getspecific(easy_mempool_g_thread_key);

    if (NULL == thread_info) {
        thread_info = (easy_mempool_thread_info_t *)easy_mempool_g_allocator.memalign(EASY_MEMPOOL_ALIGNMENT,
                      sizeof(easy_mempool_thread_info_t));

        if (NULL != thread_info) {
            thread_info->ref_cnt = 1;
            thread_info->pool = easy_mempool_create(0);
        }

        pthread_setspecific(easy_mempool_g_thread_key, thread_info);
    }

    if (0 != size
            && NULL != thread_info
            && easy_mempool_g_thread_memlimit >= (int64_t)(easy_mempool_g_thread_memtotal + size)) {
        alloc_ptr = easy_mempool_alloc(thread_info->pool, size + sizeof(easy_mempool_thread_info_t *));

        if (NULL != alloc_ptr) {
            *(easy_mempool_thread_info_t **)(alloc_ptr) = thread_info;
            ret = (char *)alloc_ptr + sizeof(easy_mempool_thread_info_t *);
            ATOMIC_INC(&(thread_info->ref_cnt));
            ATOMIC_ADD(&easy_mempool_g_thread_memtotal, size);
        }
    }

    if (NULL != ptr && NULL != ret) {
        easy_mempool_buf_t      *buf = (easy_mempool_buf_t *)((char *)ptr -
                                       sizeof(easy_mempool_thread_info_t *) - sizeof(easy_mempool_buf_t));

        if (NULL == buf) {
            easy_mempool_free(thread_info->pool, alloc_ptr);
        } else {
            memcpy(ret, ptr, buf->size > size ? size : buf->size);
        }
    }

    if (NULL != ptr) {
        ptr = (char *)ptr - sizeof(easy_mempool_thread_info_t *);
        easy_mempool_buf_t      *buf = (easy_mempool_buf_t *)((char *)ptr - sizeof(easy_mempool_buf_t));
        easy_mempool_thread_info_t *host = *(easy_mempool_thread_info_t **)ptr;

        if (NULL != buf) {
            ATOMIC_SUB(&easy_mempool_g_thread_memtotal, buf->size - sizeof(easy_mempool_thread_info_t *));
        }

        if (NULL != host) {
            easy_mempool_free(host->pool, ptr);

            if (0 == ATOMIC_DEC_FETCH(&(host->ref_cnt))) {
                easy_mempool_destroy(host->pool);
                easy_mempool_g_allocator.free(host);
            }
        }
    }

    return ret;
}
示例#10
0
/* Retrieve an ID for the next insert operation. */
int get_next_op(uint64_t *op)
{
	*op = ATOMIC_ADD(g_npop_ops, 1);
	return (0);
}
示例#11
0
void
worker(CONFIG *cfg, uint32_t worker_type)
{
	WT_CONNECTION *conn;
	WT_SESSION *session;
	WT_CURSOR *cursor;
	const char *op_name = "search";
	char *data_buf, *key_buf, *value;
	int ret, op_ret;
	uint64_t next_incr, next_val;

	session = NULL;
	data_buf = key_buf = NULL;
	op_ret = 0;

	conn = cfg->conn;
	key_buf = calloc(cfg->key_sz + 1, 1);
	if (key_buf == NULL) {
		lprintf(cfg, ret = ENOMEM, 0, "Populate key buffer");
		goto err;
	}
	if (worker_type == WORKER_INSERT || worker_type == WORKER_UPDATE) {
		data_buf = calloc(cfg->data_sz, 1);
		if (data_buf == NULL) {
			lprintf(cfg, ret = ENOMEM, 0, "Populate data buffer");
			goto err;
		}
		memset(data_buf, 'a', cfg->data_sz - 1);
	}

	if ((ret = conn->open_session(conn, NULL, NULL, &session)) != 0) {
		lprintf(cfg, ret, 0,
		    "open_session failed in read thread");
		goto err;
	}
	if ((ret = session->open_cursor(session, cfg->uri,
	    NULL, NULL, &cursor)) != 0) {
		lprintf(cfg, ret, 0,
		    "open_cursor failed in read thread");
		goto err;
	}

	while (g_running) {
		/* Get a value in range, avoid zero. */
		if (worker_type == WORKER_INSERT)
			next_incr = ATOMIC_ADD(g_nins_ops, 1);

		if (!F_ISSET(cfg, PERF_RAND_WORKLOAD) &&
		    worker_type == WORKER_INSERT)
			next_val = cfg->icount + next_incr;
		else
			next_val = wtperf_rand(cfg);
		/*
		 * If the workload is started without a populate phase we
		 * rely on at least one insert to get a valid item id.
		 */
		if (worker_type != WORKER_INSERT &&
		    wtperf_value_range(cfg) < next_val)
			continue;
		sprintf(key_buf, "%0*" PRIu64, cfg->key_sz, next_val);
		cursor->set_key(cursor, key_buf);
		switch(worker_type) {
		case WORKER_READ:
			op_name = "read";
			op_ret = cursor->search(cursor);
			if (F_ISSET(cfg, PERF_RAND_WORKLOAD) &&
			    op_ret == WT_NOTFOUND)
				op_ret = 0;
			if (op_ret == 0)
				++g_nread_ops;
			break;
		case WORKER_INSERT_RMW:
			op_name="insert_rmw";
			op_ret = cursor->search(cursor);
			if (op_ret != WT_NOTFOUND)
				break;
			/* Fall through */
		case WORKER_INSERT:
			op_name = "insert";
			cursor->set_value(cursor, data_buf);
			op_ret = cursor->insert(cursor);
			if (F_ISSET(cfg, PERF_RAND_WORKLOAD) &&
			    op_ret == WT_DUPLICATE_KEY)
				op_ret = 0;
			if (op_ret != 0)
				++g_nfailedins_ops;
			break;
		case WORKER_UPDATE:
			op_name = "update";
			op_ret = cursor->search(cursor);
			if (op_ret == 0) {
				cursor->get_value(cursor, &value);
				memcpy(data_buf, value, cfg->data_sz);
				if (data_buf[0] == 'a')
					data_buf[0] = 'b';
				else
					data_buf[0] = 'a';
				cursor->set_value(cursor, data_buf);
				op_ret = cursor->update(cursor);
			}
			if (F_ISSET(cfg, PERF_RAND_WORKLOAD) &&
			    op_ret == WT_NOTFOUND)
				op_ret = 0;
			if (op_ret == 0)
				++g_nupdate_ops;
			break;
		default:
			lprintf(cfg, EINVAL, 0, "Invalid worker type");
			goto err;
		}

		/* Report errors and continue. */
		if (op_ret != 0)
			lprintf(cfg, op_ret, 0,
			    "%s failed for: %s", op_name, key_buf);
		else
			++g_nworker_ops;
	}

err:	if (ret != 0)
		++g_threads_quit;
	if (session != NULL)
		session->close(session, NULL);
	if (data_buf != NULL)
		free(data_buf);
	if (key_buf != NULL)
		free(key_buf);
}
示例#12
0
文件: flowing.cpp 项目: jjcook/velour
static void flow_private_graph(SeqGraph *private_sgraph, SeqGraph *resident_sgraph, SplitBuckets *buckets, uintptr_t round)
{
    // TODO: histogram of initial private_sgraph size, etc etc

    /*if (g__DOTGRAPH) {
      char dot_filename[PATH_MAX+1];
      sprintf(dot_filename, "%s/SplitBucket-%u-%u-resident.dot", g__WORK_BASE_DIRECTORY, g__PARTITION_INDEX, round);
      emit_graphviz(resident_sgraph, dot_filename);

      char dot_filename2[PATH_MAX+1];
      sprintf(dot_filename2, "%s/SplitBucket-%u-%u-work.dot", g__WORK_BASE_DIRECTORY, g__PARTITION_INDEX, round);
      emit_graphviz(private_sgraph, dot_filename2);
      }*/

    /*if( g__FULL_STATISTICS ) {
      sg_stat_components(private_sgraph, stdout);
      }*/

#ifdef VERIFY_THREADSAFE
    private_sgraph->verify(true);
#endif

    sg_remove_tips(private_sgraph, true);

#ifdef VERIFY_THREADSAFE
    private_sgraph->verify(true);
#endif

    sg_concatenate(private_sgraph, true);

#ifdef VERIFY_THREADSAFE
    private_sgraph->verify(true);
#endif

    ATOMIC_ADD(p__preRedistributionSequenceNodes,private_sgraph->node_count);

    /*
    struct lambda1 {
        static void compute_sequence_sizes(SeqNode *node) {
            if (!isNodeDead<SeqNode>(node)) {
                todo_allocated_size += node->sequence.GetAllocatedBytes();
                todo_actual_size += node->sequence.GetLengthInBytes();
            }
        }
    };
    sg_for_each(private_sgraph, lambda1::compute_sequence_sizes);
    p__preRedistributionSequenceNodeMemory += ...;
    */


    /*if (g__DOTGRAPH) {
      char dot_filename[PATH_MAX+1];
      sprintf(dot_filename, "%s/SplitBucket-%u-%u-simplify.dot", g__WORK_BASE_DIRECTORY, currentPartitionIndex, round);
      emit_graphviz(private_sgraph, dot_filename);
      }*/

    // emit sub-components that are no longer relevant to
    if (g__SLICING) { slice2_graph(private_sgraph, g__PARTITION_INDEX); }

    /*if (g__DOTGRAPH) {
      char dot_filename[PATH_MAX+1];
      sprintf(dot_filename, "%s/SplitBucket-%u-%u-worksliced.dot", g__WORK_BASE_DIRECTORY, g__PARTITION_INDEX, round);
      emit_graphviz(private_sgraph, dot_filename);
      }*/

    // emit components that are no longer relevant to the working graph
    buckets->split(private_sgraph);

    /*if (g__DOTGRAPH) {
      char dot_filename[PATH_MAX+1];
      sprintf(dot_filename, "%s/SplitBucket-%u-%u-resident-postemit.dot", g__WORK_BASE_DIRECTORY, g__PARTITION_INDEX, round);
      emit_graphviz(resident_sgraph, dot_filename);

      char dot_filename2[PATH_MAX+1];
      sprintf(dot_filename2, "%s/SplitBucket-%u-%u-work-postemit.dot", g__WORK_BASE_DIRECTORY, g__PARTITION_INDEX, round);
      emit_graphviz(private_sgraph, dot_filename2);
      }*/

    // move remaining nodes from working graph into resident graph
    private_sgraph->bulkMoveAllNodes(resident_sgraph);
    assert( private_sgraph->node_count == 0 );
}
示例#13
0
static int32_t _masterUpdate(struct GBSIOLockstepNode* node) {
	bool needsToWait = false;
	int i;
	switch (node->p->d.transferActive) {
	case TRANSFER_IDLE:
		// If the master hasn't initiated a transfer, it can keep going.
		node->nextEvent += LOCKSTEP_INCREMENT;
		break;
	case TRANSFER_STARTING:
		// Start the transfer, but wait for the other GBs to catch up
		node->transferFinished = false;
		needsToWait = true;
		ATOMIC_STORE(node->p->d.transferActive, TRANSFER_STARTED);
		node->nextEvent += 4;
		break;
	case TRANSFER_STARTED:
		// All the other GBs have caught up and are sleeping, we can all continue now
		node->nextEvent += 4;
		ATOMIC_STORE(node->p->d.transferActive, TRANSFER_FINISHING);
		break;
	case TRANSFER_FINISHING:
		// Finish the transfer
		// We need to make sure the other GBs catch up so they don't get behind
		node->nextEvent += node->d.p->period - 8; // Split the cycles to avoid waiting too long
#ifndef NDEBUG
		ATOMIC_ADD(node->p->d.transferId, 1);
#endif
		needsToWait = true;
		ATOMIC_STORE(node->p->d.transferActive, TRANSFER_FINISHED);
		break;
	case TRANSFER_FINISHED:
		// Everything's settled. We're done.
		_finishTransfer(node);
		ATOMIC_STORE(node->p->masterClaimed, false);
		node->nextEvent += LOCKSTEP_INCREMENT;
		ATOMIC_STORE(node->p->d.transferActive, TRANSFER_IDLE);
		break;
	}
	int mask = 0;
	for (i = 1; i < node->p->d.attached; ++i) {
		mask |= 1 << i;
	}
	if (mask) {
		if (needsToWait) {
			if (!node->p->d.wait(&node->p->d, mask)) {
				abort();
			}
		} else {
			node->p->d.signal(&node->p->d, mask);
		}
	}
	// Tell the other GBs they can continue up to where we were
	node->p->d.addCycles(&node->p->d, 0, node->eventDiff);
#ifndef NDEBUG
	node->phase = node->p->d.transferActive;
#endif
	if (needsToWait) {
		return 0;
	}
	return node->nextEvent;
}
示例#14
0
void
worker(CONFIG *cfg, uint32_t worker_type)
{
	WT_CONNECTION *conn;
	WT_SESSION *session;
	WT_CURSOR *cursor;
	const char *op_name = "search";
	char *data_buf, *key_buf, *value;
	int ret, op_ret;
	uint64_t next_val;

	session = NULL;
	data_buf = key_buf = NULL;
	op_ret = 0;

	conn = cfg->conn;
	key_buf = calloc(cfg->key_sz + 1, 1);
	if (key_buf == NULL) {
		lprintf(cfg, ret = ENOMEM, 0, "Populate key buffer");
		goto err;
	}
	if (worker_type == WORKER_INSERT) {
		data_buf = calloc(cfg->data_sz, 1);
		if (data_buf == NULL) {
			lprintf(cfg, ret = ENOMEM, 0, "Populate data buffer");
			goto err;
		}
		memset(data_buf, 'a', cfg->data_sz - 1);
	}

	if ((ret = conn->open_session(conn, NULL, NULL, &session)) != 0) {
		lprintf(cfg, ret, 0,
		    "open_session failed in read thread");
		goto err;
	}
	if ((ret = session->open_cursor(session, cfg->uri,
	    NULL, NULL, &cursor)) != 0) {
		lprintf(cfg, ret, 0,
		    "open_cursor failed in read thread");
		goto err;
	}

	while (g_running) {
		/* Get a value in range, avoid zero. */
#define VALUE_RANGE (cfg->icount + g_nins_ops - (cfg->insert_threads + 1))
		next_val = (worker_type == WORKER_INSERT ?
		    (cfg->icount + ATOMIC_ADD(g_nins_ops, 1)) :
		    ((uint64_t)rand() % VALUE_RANGE) + 1);
		/*
		 * If the workload is started without a populate phase we
		 * rely on at least one insert to get a valid item id.
		 */
		if (worker_type != WORKER_INSERT && VALUE_RANGE < next_val)
			continue;
		sprintf(key_buf, "%0*" PRIu64, cfg->key_sz, next_val);
		cursor->set_key(cursor, key_buf);
		switch(worker_type) {
		case WORKER_READ:
			op_name = "read";
			op_ret = cursor->search(cursor);
			if (op_ret == 0)
				++g_nread_ops;
			break;
		case WORKER_INSERT:
			op_name = "insert";
			cursor->set_value(cursor, data_buf);
			op_ret = cursor->insert(cursor);
			if (op_ret != 0)
				++g_nfailedins_ops;
			break;
		case WORKER_UPDATE:
			op_name = "update";
			op_ret = cursor->search(cursor);
			if (op_ret == 0) {
				cursor->get_value(cursor, &value);
				if (value[0] == 'a')
					value[0] = 'b';
				else
					value[0] = 'a';
				op_ret = cursor->update(cursor);
			}
			if (op_ret == 0)
				++g_nupdate_ops;
			break;
		default:
			lprintf(cfg, EINVAL, 0, "Invalid worker type");
			goto err;
		}

		/* Report errors and continue. */
		if (op_ret != 0)
			lprintf(cfg, op_ret, 0,
			    "%s failed for: %s", op_name, key_buf);
		else
			++g_nworker_ops;
	}

err:	if (ret != 0)
		++g_threads_quit;
	if (session != NULL)
		session->close(session, NULL);
	if (data_buf != NULL)
		free(data_buf);
	if (key_buf != NULL)
		free(key_buf);
}
    FIFOAllocator::Page *FIFOAllocator::get_page_(const int64_t require_size, uint64_t &id)
    {
      Page *ret = NULL;
      ThreadNode *thread_node = (ThreadNode*)pthread_getspecific(thread_node_key_);
      if (NULL == thread_node)
      {
        pthread_spin_lock(&thread_node_lock_);
        thread_node = thread_node_allocator_.alloc(sizeof(ThreadNode));
        pthread_spin_unlock(&thread_node_lock_);
        if (NULL != thread_node)
        {
          thread_node->id = UINT64_MAX;
          thread_node->next = NULL;
          int tmp_ret = pthread_setspecific(thread_node_key_, thread_node);
          if (0 != tmp_ret)
          {
            TBSYS_LOG(WARN, "setspecific fail, ret=%d key=%d thread_node=%p",
                tmp_ret, thread_node_key_, thread_node);
          }
          else
          {
            while (true)
            {
              volatile ThreadNode *ov = thread_node_list_;
              thread_node->next = ov;
              if (ov == ATOMIC_CAS(&thread_node_list_, ov, thread_node))
              {
                break;
              }
            }

            int64_t new_thread_num = ATOMIC_ADD(&thread_num_, 1);
            int64_t old_total_limit = total_limit_;
            int64_t new_total_limit = page_size_ * new_thread_num;
            if (new_total_limit > total_limit_
                && old_total_limit == ATOMIC_CAS(&total_limit_, old_total_limit, new_total_limit))
            {
              TBSYS_LOG(WARN, "total_limit cannot support at least one page for each thread, will force to modify total_limit from %ld to %ld",
                  old_total_limit, new_total_limit);
            }
          }
        }
      }
      if (NULL != thread_node)
      {
        Page *tmp_page = NULL;
        id_map_.get(thread_node->id, tmp_page);
        if (NULL != tmp_page
            && page_size_ < (tmp_page->pos + require_size))
        {
          revert_page_(thread_node->id, NULL);
          thread_node->id = UINT64_MAX;
          tmp_page = NULL;
        }
        if (NULL == tmp_page)
        {
          tmp_page = alloc_page_();
          uint64_t tmp_id = UINT64_MAX;
          if (NULL != tmp_page)
          {
            ATOMIC_ADD(&(tmp_page->ref_cnt), 1);
            if (OB_SUCCESS == id_map_.assign(tmp_page, tmp_id))
            {
              thread_node->id = tmp_id;
            }
            else
            {
              TBSYS_LOG(WARN, "assign from id_map_ fail, page=%p", tmp_page);
              free_page_(tmp_page);
              tmp_page = NULL;
            }
          }
        }
        if (NULL != tmp_page
            && page_size_ >= (tmp_page->pos + require_size))
        {
          id = thread_node->id;
          ret = tmp_page;
        }
      }
      return ret;
    }
示例#16
0
void *
slab_alloc(void *p, uint64_t size)
{
	uint64_t slab_sz = SLAB_START_SZ;
	int i;
	uint64_t div;
	void *ptr;
	struct slabentry *slab;

	if (bypass) return (malloc(size));
	ATOMIC_ADD(total_allocs, 1);
	slab = NULL;

	/* First check if we can use a dynamic slab of this size. */
	slab = try_dynamic_slab(size);

	if (!slab) {
		if (size <= ONEM) {
			/* First fifteen slots are power of 2 sizes upto 1M. */
			slab = &slabheads[find_slot(size)];
		} else {
			/* Next slots are in intervals of 1M. */
			div = size / ONEM;
			if (size % ONEM) div++;
			if (div < NUM_LINEAR) slab = &slabheads[div + NUM_POW2 - 1];
		}
	}

	if (!slab) {
		struct bufentry *buf = (struct bufentry *)malloc(sizeof (struct bufentry));
		uint32_t hindx;

		buf->ptr = malloc(size);
		buf->slab = NULL;
		hindx = hash6432shift((unsigned long)(buf->ptr)) & (HTABLE_SZ - 1);

		pthread_mutex_lock(&hbucket_locks[hindx]);
		buf->next = htable[hindx];
		htable[hindx] = buf;
		pthread_mutex_unlock(&hbucket_locks[hindx]);
		ATOMIC_ADD(oversize_allocs, 1);
		ATOMIC_ADD(hash_entries, 1);
		return (buf->ptr);
	} else {
		struct bufentry *buf;
		uint32_t hindx;

		pthread_mutex_lock(&(slab->slab_lock));
		if (slab->avail == NULL) {
			slab->allocs++;
			pthread_mutex_unlock(&(slab->slab_lock));
			buf = (struct bufentry *)malloc(sizeof (struct bufentry));
			buf->ptr = malloc(slab->sz);
			buf->slab = slab;
		} else {
			buf = slab->avail;
			slab->avail = buf->next;
			slab->hits++;
			pthread_mutex_unlock(&(slab->slab_lock));
		}

		hindx = hash6432shift((unsigned long)(buf->ptr)) & (HTABLE_SZ - 1);
		if (htable[hindx]) ATOMIC_ADD(hash_collisions, 1);
		pthread_mutex_lock(&hbucket_locks[hindx]);
		buf->next = htable[hindx];
		htable[hindx] = buf;
		pthread_mutex_unlock(&hbucket_locks[hindx]);
		ATOMIC_ADD(hash_entries, 1);
		return (buf->ptr);
	}
}