예제 #1
0
static void vappend(const char *fmt,va_list ap)
{
    const char *walk,*next;

    for (walk = next = fmt; *walk; walk++)
	if (*walk == '%') {
	    if (walk != next) append_chunk(next,walk-next);
	    if (*++walk == 's') {
		const char *str;

		str = va_arg(ap,const char *);
		append_chunk(str,strlen(str));
	    }
	    else {
		char buf[21]; /* big enough for 64 bits */
		int num;

		while (isdigit(*walk) || *walk == 'l') walk++; /* @@@ FIXME */
		if (*walk != 'd' && *walk != 'x') {
		    fprintf(stderr,"bad format character %c (%d)\n",*walk,
		      *walk);
		    exit(1);
		}
		num = va_arg(ap,int);
		sprintf(buf,*walk == 'd' ? "%d" : "%x",num);
		append_chunk(buf,strlen(buf));
	    }
	    next = walk+1;
	}
예제 #2
0
std::shared_ptr<Table> create_reference_table(std::shared_ptr<Table> referenced_table, size_t num_rows,
                                              size_t num_columns) {
  const auto num_rows_per_chunk = num_rows / GENERATED_TABLE_NUM_CHUNKS;

  TableColumnDefinitions column_definitions;
  for (size_t column_idx = 0; column_idx < num_columns; ++column_idx) {
    column_definitions.emplace_back("c" + std::to_string(column_idx), DataType::Int);
  }
  auto table = std::make_shared<Table>(column_definitions, TableType::References);

  for (size_t row_idx = 0; row_idx < num_rows;) {
    const auto num_rows_in_this_chunk = std::min(num_rows_per_chunk, num_rows - row_idx);

    Segments segments;
    for (auto column_idx = ColumnID{0}; column_idx < num_columns; ++column_idx) {
      /**
       * By specifying a chunk size of num_rows * 0.2f for the referenced table, we're emulating a referenced table
       * of (num_rows * 0.2f) * REFERENCED_TABLE_CHUNK_COUNT rows - i.e. twice as many rows as the referencing table
       * we're creating. So when creating TWO referencing tables, there should be a fair amount of overlap.
       */
      auto pos_list = generate_pos_list(num_rows * 0.2f, num_rows_per_chunk);
      segments.push_back(std::make_shared<ReferenceSegment>(referenced_table, column_idx, pos_list));
    }
    table->append_chunk(segments);

    row_idx += num_rows_in_this_chunk;
  }

  return table;
}
예제 #3
0
std::shared_ptr<const Table> ShowColumns::_on_execute() {
  TableColumnDefinitions column_definitions;
  column_definitions.emplace_back("column_name", DataType::String);
  column_definitions.emplace_back("column_type", DataType::String);
  column_definitions.emplace_back("is_nullable", DataType::Int);
  auto out_table = std::make_shared<Table>(column_definitions, TableType::Data);

  const auto table = StorageManager::get().get_table(_table_name);
  Segments segments;

  const auto& column_names = table->column_names();
  const auto vs_names = std::make_shared<ValueSegment<pmr_string>>(
      tbb::concurrent_vector<pmr_string>(column_names.begin(), column_names.end()));
  segments.push_back(vs_names);

  const auto& column_types = table->column_data_types();

  auto column_types_as_string = tbb::concurrent_vector<pmr_string>{};
  for (const auto column_type : column_types) {
    column_types_as_string.push_back(pmr_string{data_type_to_string.left.at(column_type)});
  }

  const auto vs_types = std::make_shared<ValueSegment<pmr_string>>(std::move(column_types_as_string));
  segments.push_back(vs_types);

  const auto& column_nullables = table->columns_are_nullable();
  const auto vs_nullables = std::make_shared<ValueSegment<int32_t>>(
      tbb::concurrent_vector<int32_t>(column_nullables.begin(), column_nullables.end()));
  segments.push_back(vs_nullables);

  out_table->append_chunk(segments);

  return out_table;
}
// Copies chunks from parameter bit set array and appends as single chunk
// Updates size
void PkPooledRawBitSetArray::push_back_chunks( const PkPooledRawBitSetArray& bitSetArray )
{
	// Allocate single contiguous chunk
	byte_type* p_chunk = (byte_type*) malloc( bitSetArray.num_total_bytes() );
	PkAssert( NULL != p_chunk );
	// Append chunk
	append_chunk( p_chunk, bitSetArray.num_total_bytes(), true /* b_owned */ );
	// Copy all bit sets
	const PkPooledRawBitSetChunkArray& other_chunks = bitSetArray.get_chunks();
	for ( size_type i=0; i<other_chunks.size(); ++i )
	{
		memcpy( p_chunk, other_chunks[ i ].first, other_chunks[ i ].second );
		p_chunk += other_chunks[ i ].second;
	}
	// Update size
	m_size += bitSetArray.size();
}
// Constructor
// @param p_chunk - pointer to chunk of allocated memory - it will be soft copied!
// @param b_owned - true if chunk is now owned by this array (it will be freed upon this array's destruction)
// @param num_bits - the size of each bit field within the array
// @param num_elements - number of addressable bit sets
// @param num_initial_reserved_pool_elements - the number of contiguous elements to reserve - must be power of 2
PkPooledRawBitSetArray::PkPooledRawBitSetArray(
	  buffer_type p_chunk
	, const bool b_owned
	, const size_type num_bits
	, const size_type num_elements
	, const size_type num_initial_reserved_pool_elements
	) : m_num_bits( num_bits )
	  , m_num_blocks( calc_num_blocks( num_bits ) )
	  , m_size( num_elements )
	  // Initialize memory pool
	  , mp_pool_alloc( get_allocated_pool( num_bits, num_initial_reserved_pool_elements ) )
{
	// Assert allocator is non-null
	PkAssert( mp_pool_alloc );
	// Assert number of bits is positive
	PkAssert( 0 < m_num_bits );
	// Append chunk
	append_chunk( (byte_type*) p_chunk, num_total_bytes(), b_owned );
}
예제 #6
0
static int rpc_add(rpc_ctx_t* ctx, char* fmt, ...)
{
	void** void_ptr;
	va_list ap;
	str s = {"", 0};
	struct text_chunk* l;

	va_start(ap, fmt);
	while(*fmt) {
		if (*fmt == '{' || *fmt == '[') {
			void_ptr = va_arg(ap, void**);
			l = new_chunk(&s);
			if (!l) {
				rpc_fault(ctx, 500, "Internal Server Error");
				goto err;
			}
			l->ctx=ctx;
			append_chunk(ctx, l);
			*void_ptr = l;
		} else {
			if (print_value(ctx, *fmt, &ap) < 0) goto err;
예제 #7
0
/* Append a data buffer to a super-chunk. */
size_t blosc2_append_buffer(blosc2_sheader* sheader, size_t typesize,
                            size_t nbytes, void* src) {
  int cbytes;
  void* chunk = malloc(nbytes + BLOSC_MAX_OVERHEAD);
  uint8_t* dec_filters = decode_filters(sheader->filters);
  int clevel = sheader->clevel;
  char* compname;
  int doshuffle, ret;

  /* Apply filters prior to compress */
  if (dec_filters[0] == BLOSC_DELTA) {
    doshuffle = dec_filters[1];
    if (sheader->filters_chunk == NULL) {
      ret = blosc2_set_delta_ref(sheader, nbytes, src);
      if (ret < 0) {
        return((size_t)ret);
      }
    }
  }
  else {
    doshuffle = dec_filters[0];
  }
  free(dec_filters);

  /* Compress the src buffer using super-chunk defaults */
  blosc_compcode_to_compname(sheader->compressor, &compname);
  blosc_set_compressor(compname);
  blosc_set_schunk(sheader);
  cbytes = blosc_compress(clevel, doshuffle, typesize, nbytes, src, chunk,
                          nbytes + BLOSC_MAX_OVERHEAD);
  if (cbytes < 0) {
    free(chunk);
    return cbytes;
  }

  /* Append the chunk */
  return append_chunk(sheader, chunk);
}
// Re-initializes a constructed array to the following parameters (wipes any stored bit sets)
// @param p_chunk - pointer to chunk of allocated memory - it will be soft copied!
// @param b_owned - true if chunk is now owned by this array (it will be freed upon this array's destruction)
// @param num_bits - the size of each bit field within the array
// @param num_elements - number of addressable bit sets
// @param num_initial_reserved_pool_elements - the number of contiguous elements to reserve - must be power of 2
void PkPooledRawBitSetArray::reinit(
	  buffer_type p_chunk
	, const bool b_owned
	, const size_type num_bits
	, const size_type num_elements
	, const size_type num_initial_reserved_pool_elements
	)
{
	// Wipe everything!
	clear();
	// Store number of bits
	force_set_num_bits( num_bits );
	// Assert number of bits is positive
	PkAssert( 0 < m_num_bits );
	// Determine number of blocks
	force_set_num_blocks( calc_num_blocks( num_bits ) );
	// Allocate a new memory pool
	PkVerify( mp_pool_alloc = get_allocated_pool( num_bits, num_initial_reserved_pool_elements ) );
	// Update our size
	m_size = num_elements;
	// Append chunk
	append_chunk( (byte_type*) p_chunk, num_total_bytes(), b_owned );
}
예제 #9
0
/*
 * Called by dlmalloc_inspect_all. If used_bytes != 0 then start is
 * the start of a malloc-ed piece of memory of size used_bytes. If
 * start is 0 then start is the beginning of any free space not
 * including dlmalloc's book keeping and end the start of the next
 * dlmalloc chunk. Regions purely containing book keeping don't
 * callback.
 */
static void heap_chunk_callback(void* start, void* end, size_t used_bytes,
                                void* arg)
{
    u1 state;
    HeapChunkContext *ctx = (HeapChunkContext *)arg;
    UNUSED_PARAMETER(end);

    if (used_bytes == 0) {
        if (start == NULL) {
            // Reset for start of new heap.
            ctx->startOfNextMemoryChunk = NULL;
            flush_hpsg_chunk(ctx);
        }
        // Only process in use memory so that free region information
        // also includes dlmalloc book keeping.
        return;
    }

    /* If we're looking at the native heap, we'll just return
     * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
     */
    bool native = ctx->type == CHUNK_TYPE("NHSG");

    if (ctx->startOfNextMemoryChunk != NULL) {
        // Transmit any pending free memory. Native free memory of
        // over kMaxFreeLen could be because of the use of mmaps, so
        // don't report. If not free memory then start a new segment.
        bool flush = true;
        if (start > ctx->startOfNextMemoryChunk) {
            const size_t kMaxFreeLen = 2 * SYSTEM_PAGE_SIZE;
            void* freeStart = ctx->startOfNextMemoryChunk;
            void* freeEnd = start;
            size_t freeLen = (char*)freeEnd - (char*)freeStart;
            if (!native || freeLen < kMaxFreeLen) {
                append_chunk(ctx, HPSG_STATE(SOLIDITY_FREE, 0),
                             freeStart, freeLen);
                flush = false;
            }
        }
        if (flush) {
            ctx->startOfNextMemoryChunk = NULL;
            flush_hpsg_chunk(ctx);
        }
    }
    const Object *obj = (const Object *)start;

    /* It's an allocated chunk.  Figure out what it is.
     */
//TODO: if ctx.merge, see if this chunk is different from the last chunk.
//      If it's the same, we should combine them.
    if (!native && dvmIsValidObject(obj)) {
        ClassObject *clazz = obj->clazz;
        if (clazz == NULL) {
            /* The object was probably just created
             * but hasn't been initialized yet.
             */
            state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
        } else if (dvmIsTheClassClass(clazz)) {
            state = HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
        } else if (IS_CLASS_FLAG_SET(clazz, CLASS_ISARRAY)) {
            if (IS_CLASS_FLAG_SET(clazz, CLASS_ISOBJECTARRAY)) {
                state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
            } else {
                switch (clazz->elementClass->primitiveType) {
                case PRIM_BOOLEAN:
                case PRIM_BYTE:
                    state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
                    break;
                case PRIM_CHAR:
                case PRIM_SHORT:
                    state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
                    break;
                case PRIM_INT:
                case PRIM_FLOAT:
                    state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
                    break;
                case PRIM_DOUBLE:
                case PRIM_LONG:
                    state = HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
                    break;
                default:
                    assert(!"Unknown GC heap object type");
                    state = HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
                    break;
                }
            }
        } else {
            state = HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
        }
    } else {
        obj = NULL; // it's not actually an object
        state = HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
    }
    append_chunk(ctx, state, start, used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD);
    ctx->startOfNextMemoryChunk =
        (char*)start + used_bytes + HEAP_SOURCE_CHUNK_OVERHEAD;
}
예제 #10
0
/*
 * Convert a value to data chunk and add it to reply
 */
static int print_value(rpc_ctx_t* ctx, char fmt, va_list* ap)
{
	struct text_chunk* l;
	str str_val;
	str* sp;
	char buf[256];

	switch(fmt) {
	case 'd':
	case 't':
		str_val.s = int2str(va_arg(*ap, int), &str_val.len);
		l = new_chunk(&str_val);
		if (!l) {
			rpc_fault(ctx, 500, "Internal server error while processing"
					" line %d", ctx->line_no);
			goto err;
		}
		break;
		
	case 'f':
		str_val.s = buf;
		str_val.len = snprintf(buf, 256, "%f", va_arg(*ap, double));
		if (str_val.len < 0) {
			rpc_fault(ctx, 400, "Error While Converting double");
			ERR("Error while converting double\n");
			goto err;
		}
		l = new_chunk(&str_val);
		if (!l) {
			rpc_fault(ctx, 500, "Internal Server Error, line %d",
						ctx->line_no);
			goto err;
		}
		break;
		
	case 'b':
		str_val.len = 1;
		str_val.s = ((va_arg(*ap, int) == 0) ? "0" : "1");
		l = new_chunk(&str_val);
		if (!l) {
			rpc_fault(ctx, 500, "Internal Server Error, line %d", 
						ctx->line_no);
			goto err;
		}
		break;
				
	case 's':
		str_val.s = va_arg(*ap, char*);
		str_val.len = strlen(str_val.s);
		l = new_chunk_escape(&str_val, 0);
		if (!l) {
			rpc_fault(ctx, 500, "Internal Server Error, line %d", 
						ctx->line_no);
			goto err;
		}
		break;
		
	case 'S':
		sp = va_arg(*ap, str*);
		l = new_chunk_escape(sp, 0);
		if (!l) {
			rpc_fault(ctx, 500, "Internal Server Error, line %d", 
							ctx->line_no);
			goto err;
		}
		break;
		
	default:
		rpc_fault(ctx, 500, "Bug In SER (Invalid formatting character %c)", fmt);
		ERR("Invalid formatting character\n");
		goto err;
	}

	l->flags |= CHUNK_POSITIONAL;
	append_chunk(ctx, l);
	return 0;
 err:
	return -1;
}