// Allocates a bit buffer from pooled memory
// Updates size
PkPooledRawBitSetArray::buffer_type PkPooledRawBitSetArray::allocate_bit_buffer()
{
	// Allocate a new buffer from our pool allocator
	byte_type* const p_buffer = (byte_type*) get_pool_alloc().malloc();
	PkAssert( NULL != p_buffer );

	// Determine if it's contiguous with our current chunk
	if ( is_contiguous_byte_buffer( p_buffer ) )
	{
		// Assert that this is a pooled chunk
		PkAssert( get_pool_alloc().is_from( get_chunks().back().first ) );
		// Assert that parallel arrays are same size
		PkAssert( m_owned_chunks_mask.size() == num_chunks() );
		// Assert that we don't own this chunk
		PkAssert( !is_owned_chunk( num_chunks()-1 ) );
		// Update current contiguous chunk
		get_chunks().back().second += num_bytes();
	}
	else
	{
		// Start a new contiguous chunk
		get_chunks().push_back( PkPooledRawBitSetChunkInfo( p_buffer, num_bytes() ) );
		// This chunk is owned by the pool; therefore, we don't have to free it explicitly
		m_owned_chunks_mask.push_back( false );
		// Assert that parallel arrays are the same size
		PkAssert(  m_owned_chunks_mask.size() == num_chunks() );
	}

	// Keep track of how many bit buffers are in this collection
	++m_size;

	// Return allocated buffer
	return (buffer_type) p_buffer;
}
Пример #2
0
void align_polygon_model_data(polymodel *pm)
{
	int i, chunk_len;
	int total_correction = 0;
	ubyte *cur_old, *cur_new;
	chunk cur_ch;
	chunk ch_list[MAX_CHUNKS];
	int no_chunks = 0;
	int tmp_size = pm->model_data_size + SHIFT_SPACE;
	ubyte *tmp = d_malloc(tmp_size); // where we build the aligned version of pm->model_data

	Assert(tmp != NULL);
	//start with first chunk (is always aligned!)
	cur_old = pm->model_data;
	cur_new = tmp;
	chunk_len = get_chunks(cur_old, cur_new, ch_list, &no_chunks);
	memcpy(cur_new, cur_old, chunk_len);
	while (no_chunks > 0) {
		int first_index = get_first_chunks_index(ch_list, no_chunks);
		cur_ch = ch_list[first_index];
		// remove first chunk from array:
		no_chunks--;
		for (i = first_index; i < no_chunks; i++)
			ch_list[i] = ch_list[i + 1];
		// if (new) address unaligned:
		if ((u_int32_t)new_dest(cur_ch) % 4L != 0) {
			// calculate how much to move to be aligned
			short to_shift = 4 - (u_int32_t)new_dest(cur_ch) % 4L;
			// correct chunks' addresses
			cur_ch.correction += to_shift;
			for (i = 0; i < no_chunks; i++)
				ch_list[i].correction += to_shift;
			total_correction += to_shift;
			Assert((u_int32_t)new_dest(cur_ch) % 4L == 0);
			Assert(total_correction <= SHIFT_SPACE); // if you get this, increase SHIFT_SPACE
		}
		//write (corrected) chunk for current chunk:
		*((short *)(cur_ch.new_base + cur_ch.offset))
		  = INTEL_SHORT(cur_ch.correction
				+ INTEL_SHORT(*((short *)(cur_ch.old_base + cur_ch.offset))));
		//write (correctly aligned) chunk:
		cur_old = old_dest(cur_ch);
		cur_new = new_dest(cur_ch);
		chunk_len = get_chunks(cur_old, cur_new, ch_list, &no_chunks);
		memcpy(cur_new, cur_old, chunk_len);
		//correct submodel_ptr's for pm, too
		for (i = 0; i < MAX_SUBMODELS; i++)
			if (pm->model_data + pm->submodel_ptrs[i] >= cur_old
			    && pm->model_data + pm->submodel_ptrs[i] < cur_old + chunk_len)
				pm->submodel_ptrs[i] += (cur_new - tmp) - (cur_old - pm->model_data);
 	}
	d_free(pm->model_data);
	pm->model_data_size += total_correction;
	pm->model_data = 
	d_malloc(pm->model_data_size);
	Assert(pm->model_data != NULL);
	memcpy(pm->model_data, tmp, pm->model_data_size);
	d_free(tmp);
}
Пример #3
0
static void align_polygon_model_data(polymodel *pm)
{
	int chunk_len;
	int total_correction = 0;
	chunk cur_ch;
	chunk ch_list[MAX_CHUNKS];
	int no_chunks = 0;
	int tmp_size = pm->model_data_size + SHIFT_SPACE;
	RAIIdmem<uint8_t[]> tmp;
	MALLOC(tmp, uint8_t[], tmp_size); // where we build the aligned version of pm->model_data

	Assert(tmp != NULL);
	//start with first chunk (is always aligned!)
	const uint8_t *cur_old = pm->model_data.get();
	auto cur_new = tmp.get();
	chunk_len = get_chunks(cur_old, cur_new, ch_list, &no_chunks);
	memcpy(cur_new, cur_old, chunk_len);
	while (no_chunks > 0) {
		int first_index = get_first_chunks_index(ch_list, no_chunks);
		cur_ch = ch_list[first_index];
		// remove first chunk from array:
		no_chunks--;
		for (int i = first_index; i < no_chunks; i++)
			ch_list[i] = ch_list[i + 1];
		// if (new) address unaligned:
		const uintptr_t u = reinterpret_cast<uintptr_t>(new_dest(cur_ch));
		if (u % 4L != 0) {
			// calculate how much to move to be aligned
			short to_shift = 4 - u % 4L;
			// correct chunks' addresses
			cur_ch.correction += to_shift;
			for (int i = 0; i < no_chunks; i++)
				ch_list[i].correction += to_shift;
			total_correction += to_shift;
			Assert(reinterpret_cast<uintptr_t>(new_dest(cur_ch)) % 4L == 0);
			Assert(total_correction <= SHIFT_SPACE); // if you get this, increase SHIFT_SPACE
		}
		//write (corrected) chunk for current chunk:
		*(reinterpret_cast<short *>(cur_ch.new_base + cur_ch.offset))
		  = INTEL_SHORT(static_cast<short>(cur_ch.correction + GET_INTEL_SHORT(cur_ch.old_base + cur_ch.offset)));
		//write (correctly aligned) chunk:
		cur_old = old_dest(cur_ch);
		cur_new = new_dest(cur_ch);
		chunk_len = get_chunks(cur_old, cur_new, ch_list, &no_chunks);
		memcpy(cur_new, cur_old, chunk_len);
		//correct submodel_ptr's for pm, too
		for (int i = 0; i < MAX_SUBMODELS; i++)
			if (&pm->model_data[pm->submodel_ptrs[i]] >= cur_old
			    && &pm->model_data[pm->submodel_ptrs[i]] < cur_old + chunk_len)
				pm->submodel_ptrs[i] += (cur_new - tmp.get()) - (cur_old - pm->model_data.get());
 	}
	pm->model_data_size += total_correction;
	pm->model_data = make_unique<ubyte[]>(pm->model_data_size);
	Assert(pm->model_data != NULL);
	memcpy(pm->model_data.get(), tmp.get(), pm->model_data_size);
}
// Deallocates all owned chunks
void PkPooledRawBitSetArray::release_owned_chunks()
{
	for ( 
		  PkBitSet::size_type itr_chunk = m_owned_chunks_mask.find_first()
		; itr_chunk != PkBitSet::npos
		; itr_chunk = m_owned_chunks_mask.find_next( itr_chunk )
		)
	{
		release_owned_chunk( itr_chunk );
		get_chunks()[ itr_chunk ].first = NULL;
		get_chunks()[ itr_chunk ].second = 0;
	}
}
Пример #5
0
  std::vector<String> MMSeg::segment(const String& s, int depth) {
    std::vector<String> ret;
    auto start = s.begin(), end = s.end();
    while (start != end) {
      auto chunks = get_chunks(start, end, depth);
      auto best = std::max_element(chunks.begin(), chunks.end(), [&](const Chunk& x, const Chunk& y) {
        return std::tie(x.length_, x.mean_, x.var_, x.degree_) < std::tie(y.length_, y.mean_, y.var_, y.degree_);
      });

      auto& word = best->words_.front();// 取出分词结果最好的第一个分词结果
      start += length(word);// 处理后续字串
      ret.emplace_back(String(word.first, word.second));
#ifdef DEBUG_LEVEL // 输出计算过程
	  static int times;
	  if(s.begin() == word.first) times = 1;//处理一个新的字串
	  std::cout<<"Step: "<<times<<std::endl;
	  for(auto &item:chunks) {
		  cout<<item.to_string()<<std::endl;
	  }
	  std::cout<<"Best one is: "<<TransCode::to_utf8(ret.back())<<std::endl;
	  ++times;
#endif
    }// while
#ifdef DEBUG_LEVEL
	    std::cout<<"Result is: ";
#endif
    return ret;
  }
Пример #6
0
/*---------------------------------------------------------------------
 * get_lcrs - Execute loop to get lcrs from outbound server.
 *---------------------------------------------------------------------*/
static void get_lcrs(myctx_t *ctx)
{
  sword       err = OCI_SUCCESS;
  ub4         startcnt = 0;
  void       *lcr;
  ub1         lcrtype;
  oraub8      flag;
  ub1         flwm[OCI_LCR_MAX_POSITION_LEN];
  ub2         flwm_len = 0;
  ub1         proclwm[OCI_LCR_MAX_POSITION_LEN];
  ub2         proclwm_len = 0;
  sb4         rtncode = 0;
  oci_t      *ocip = ctx->outbound_ocip;

  printf ("\n>>> get_lcrs -- Start\n");

  ctx->lcrcnt = 0;

  while (err == OCI_SUCCESS)
  {
    startcnt = ctx->lcrcnt;
    /* if unsupported LCRs are returned, we need to ignore them and
     * continue
     */
    while ((err = OCIXStreamOutLCRReceive(ocip->svcp, ocip->errp,
                    &lcr, &lcrtype, &flag, flwm, &flwm_len, OCI_DEFAULT))
               == OCI_STILL_EXECUTING)
    {
      /* print ID Key LCRs */
      process_IDKeyLCR(ctx, lcr, lcrtype, flag);
      
      /* If LCR has chunked columns (i.e, has LOB/Long/XMLType columns) */
      if (flag & OCI_XSTREAM_MORE_ROW_DATA)
      {
        /* Get all the chunks belonging to the current LCR. */
        get_chunks(ctx);
      }
    }

    /* print lwm */
    if (flwm_len > 0) 
    {
      time_t  tm;
      time(&tm);
      printf ("\n=== Current time = %s", ctime(&tm));
      print_pos(ocip, flwm, flwm_len, (char *)"Outbound lwm");
    }
 
  }
  
  if (err)
  {
    ocierror(ocip, (char *)"get_lcrs() encounters error", FALSE);
  }

  printf (">>> get_lcrs [DONE]\n\n");
}
Пример #7
0
void runloop(int loopid)  {

#pragma omp parallel default(none) shared(loopid, remaining_iters, hi, lo, remaining_iters_lock) 
  {
    int chunk, start_iter, end_iter, remaining_iters_tmp;
    int next_thread_id;
    int myid  = omp_get_thread_num();
    int nthreads = omp_get_num_threads(); 
    double K = (double) 1/nthreads;//k=1/p
    int ipt = (int) ceil((double)N/(double)nthreads); 
    lo[myid] = myid*ipt;
    hi[myid] = (myid+1)*ipt;
    if (hi[myid] > N) hi[myid] = N;

    remaining_iters_tmp = hi[myid]-lo[myid];
    remaining_iters[myid] = remaining_iters_tmp;

    while(remaining_iters_tmp > 0) { 
	get_chunks(myid, K, &start_iter, &chunk);
	/* Set DEBUG flag to TRUE if you want to see the flow details*/
	if(DEBUG==TRUE) print_run_details("Own", loopid, myid, myid, start_iter, chunk);
	switch(loopid){
		case 1: loop1chunk(start_iter, start_iter+chunk);
		case 2: loop2chunk(start_iter, start_iter+chunk);
	}
	remaining_iters_tmp = read_remaining_iters(myid);
    }//end while loop 1

    get_most_loaded_thread_details(nthreads, &next_thread_id, &remaining_iters_tmp);
    
    while(remaining_iters_tmp >0){
	get_chunks(next_thread_id, K, &start_iter, &chunk);
	
	/* Set DEBUG flag to TRUE if you want to see the flow details*/
	if(DEBUG==TRUE) print_run_details("Affinity", loopid, myid, next_thread_id, start_iter, chunk);
        switch(loopid){
                case 1: loop1chunk(start_iter, start_iter+chunk);
                case 2: loop2chunk(start_iter, start_iter+chunk);
        }
	get_most_loaded_thread_details(nthreads, &next_thread_id, &remaining_iters_tmp);
    }//end while loop 2
	
  }
}
// Removes last element from pooled array
void PkPooledRawBitSetArray::pop_back()
{
	// Assert we have elements to pop
	PkAssert( !empty() );
	// Assert that our byte offset indicates we have elements as well
	PkAssert( get_chunks().back().second >= num_bytes() );
	// Assert that byte offset is proper multiple of number of blocks to represent a bit set
	PkAssert( byte_offset_is_proper_multiple( get_chunks().back().second ) );
	// If last element is from pooled chunk, then release it back to pool
	if ( !is_owned_chunk( num_chunks()-1 ) )
	{
		get_pool_alloc().free( get_back_bit_buffer() );
	}
	// If chunk now has zero elements, remove the chunk
	if ( 0 == ( get_chunks().back().second -= num_bytes() ) ) 
	{
		remove_back_chunk();
	}
	// Update our size
	--m_size;
	// Assert that we are empty or new back chunk has elements
	PkAssert( empty() || (get_chunks().back().second >= num_bytes()) );
}
// Releases all memory and reset state
void PkPooledRawBitSetArray::clear()
{
	release_owned_chunks();
	m_owned_chunks_mask.clear();
	get_chunks().clear();
	if ( mp_pool_alloc )
	{
		get_pool_alloc().purge_memory();
		delete mp_pool_alloc;
		mp_pool_alloc = NULL;
	}
	force_set_num_bits( 0 );
	force_set_num_blocks( 0 );
	m_size = 0;
}
Пример #10
0
static int write_to_file(startree_t* s, const char* fn, anbool flipped,
						 FILE* fid) {
    bl* chunks;
    il* wordsizes = NULL;
    int i;
    kdtree_fits_t* io = NULL;

	// just haven't bothered...
	assert(!(flipped && fid));

	if (fn) {
		io = kdtree_fits_open_for_writing(fn);
		if (!io) {
			ERROR("Failed to open file \"%s\" for writing kdtree", fn);
			return -1;
		}
	}
    if (flipped) {
        if (kdtree_fits_write_tree_flipped(io, s->tree, s->header)) {
            ERROR("Failed to write (flipped) kdtree to file \"%s\"", fn);
            return -1;
        }
    } else {
		if (fid) {
			if (kdtree_fits_append_tree_to(s->tree, s->header, fid)) {
				ERROR("Failed to write star kdtree");
				return -1;
			}
		} else {
			if (kdtree_fits_write_tree(io, s->tree, s->header)) {
				ERROR("Failed to write kdtree to file \"%s\"", fn);
				return -1;
			}
		}
    }

    if (flipped)
        wordsizes = il_new(4);

    chunks = get_chunks(s, wordsizes);
    for (i=0; i<bl_size(chunks); i++) {
        fitsbin_chunk_t* chunk = bl_access(chunks, i);
        if (!chunk->data)
            continue;
        if (flipped)
            kdtree_fits_write_chunk_flipped(io, chunk, il_get(wordsizes, i));
        else {
			if (fid) {
				kdtree_fits_write_chunk_to(chunk, fid);
			} else {
				kdtree_fits_write_chunk(io, chunk);
			}
		}
		fitsbin_chunk_clean(chunk);
	}
	bl_free(chunks);

    if (flipped)
        il_free(wordsizes);
    
	if (io)
		kdtree_fits_io_close(io);
    return 0;
}
Пример #11
0
static startree_t* my_open(const char* fn, anqfits_t* fits) {
    struct timeval tv1, tv2;
	startree_t* s;
    bl* chunks;
    int i;
    kdtree_fits_t* io;
    char* treename = STARTREE_NAME;
    const char* thefn = fn;

	assert(fn || fits);

    if (!thefn)
        thefn = fits->filename;

	s = startree_alloc();
	if (!s)
		return NULL;

    gettimeofday(&tv1, NULL);
	if (fn)
		io = kdtree_fits_open(fn);
	else
		io = kdtree_fits_open_fits(fits);

    gettimeofday(&tv2, NULL);
    debug("kdtree_fits_open() took %g ms\n", millis_between(&tv1, &tv2));
	if (!io) {
        ERROR("Failed to open FITS file \"%s\"", thefn);
        goto bailout;
    }

    gettimeofday(&tv1, NULL);
    if (!kdtree_fits_contains_tree(io, treename))
        treename = NULL;
    gettimeofday(&tv2, NULL);
    debug("kdtree_fits_contains_tree() took %g ms\n", millis_between(&tv1, &tv2));

    gettimeofday(&tv1, NULL);
    s->tree = kdtree_fits_read_tree(io, treename, &s->header);
    gettimeofday(&tv2, NULL);
    debug("kdtree_fits_read_tree() took %g ms\n", millis_between(&tv1, &tv2));
    if (!s->tree) {
        ERROR("Failed to read kdtree from file \"%s\"", thefn);
        goto bailout;
    }

    // Check the tree dimensionality.
    // (because code trees can be confused...)
    if (s->tree->ndim != 3) {
        logverb("File %s contains a kd-tree with dim %i (not 3), named %s\n",
                thefn, s->tree->ndim, treename);
        s->tree->io = NULL;
        goto bailout;
    }

    gettimeofday(&tv1, NULL);
    chunks = get_chunks(s, NULL);
    for (i=0; i<bl_size(chunks); i++) {
        fitsbin_chunk_t* chunk = bl_access(chunks, i);
        void** dest = chunk->userdata;
        kdtree_fits_read_chunk(io, chunk);
        *dest = chunk->data;
    }
    bl_free(chunks);
    gettimeofday(&tv2, NULL);
    debug("reading chunks took %g ms\n", millis_between(&tv1, &tv2));

    // kdtree_fits_t is a typedef of fitsbin_t
    fitsbin_close_fd(io);

	return s;

 bailout:
    kdtree_fits_io_close(io);
    startree_close(s);
	return NULL;
}