String* String::resize_capacity(STATE, Fixnum* count) { native_int sz = count->to_native(); if(sz < 0) { Exception::argument_error(state, "negative byte array size"); } else if(sz >= INT32_MAX) { // >= is used deliberately because we use a size of + 1 // for the byte array Exception::argument_error(state, "too large byte array size"); } CharArray* ba = CharArray::create(state, sz + 1); native_int copy_size = sz; native_int data_size = as<CharArray>(data_)->size(); // Check that we don't copy any data outside the existing byte array if(unlikely(copy_size > data_size)) { copy_size = data_size; } memcpy(ba->raw_bytes(), byte_address(), copy_size); // We've unshared shared(state, Qfalse); data(state, ba); hash_value(state, nil<Fixnum>()); // If we shrunk it and num_bytes said there was more than there // is, clamp it. if(num_bytes()->to_native() > sz) { num_bytes(state, count); } return this; }
// Allocates a bit buffer from pooled memory // Updates size PkPooledRawBitSetArray::buffer_type PkPooledRawBitSetArray::allocate_bit_buffer() { // Allocate a new buffer from our pool allocator byte_type* const p_buffer = (byte_type*) get_pool_alloc().malloc(); PkAssert( NULL != p_buffer ); // Determine if it's contiguous with our current chunk if ( is_contiguous_byte_buffer( p_buffer ) ) { // Assert that this is a pooled chunk PkAssert( get_pool_alloc().is_from( get_chunks().back().first ) ); // Assert that parallel arrays are same size PkAssert( m_owned_chunks_mask.size() == num_chunks() ); // Assert that we don't own this chunk PkAssert( !is_owned_chunk( num_chunks()-1 ) ); // Update current contiguous chunk get_chunks().back().second += num_bytes(); } else { // Start a new contiguous chunk get_chunks().push_back( PkPooledRawBitSetChunkInfo( p_buffer, num_bytes() ) ); // This chunk is owned by the pool; therefore, we don't have to free it explicitly m_owned_chunks_mask.push_back( false ); // Assert that parallel arrays are the same size PkAssert( m_owned_chunks_mask.size() == num_chunks() ); } // Keep track of how many bit buffers are in this collection ++m_size; // Return allocated buffer return (buffer_type) p_buffer; }
static void* cpy_value(void* value, size_t type) { // allocate void* cpy = malloc(num_bytes(type)); if(!cpy) { fprintf(stderr, "Memory allocation failure in `cpy_value`\n"); exit(EXIT_FAILURE); } memcpy(cpy, value, num_bytes(type)); return cpy; }
String* String::append(STATE, const char* other, std::size_t length) { size_t new_size = size() + length; size_t capacity = data_->size(); if(capacity < (new_size + 1)) { // capacity needs one extra byte of room for the trailing null do { // @todo growth should be more intelligent than doubling capacity *= 2; } while(capacity < (new_size + 1)); // No need to call unshare and duplicate a ByteArray // just to throw it away. if(shared_ == Qtrue) shared(state, Qfalse); ByteArray *ba = ByteArray::create(state, capacity); std::memcpy(ba->bytes, data_->bytes, size()); data(state, ba); } else { if(shared_ == Qtrue) unshare(state); } // Append on top of the null byte at the end of s1, not after it std::memcpy(data_->bytes + size(), other, length); // The 0-based index of the last character is new_size - 1 data_->bytes[new_size] = 0; num_bytes(state, Integer::from(state, new_size)); hash_value(state, (Integer*)Qnil); return this; }
String* String::string_dup(STATE) { Module* mod = klass_; Class* cls = try_as_instance<Class>(mod); if(unlikely(!cls)) { while(!cls) { mod = mod->superclass(); if(mod->nil_p()) rubinius::bug("Object::class_object() failed to find a class"); cls = try_as_instance<Class>(mod); } } String* so = state->new_object<String>(cls); so->set_tainted(is_tainted_p()); so->num_bytes(state, num_bytes()); so->data(state, data()); so->hash_value(state, hash_value()); so->shared(state, Qtrue); shared(state, Qtrue); return so; }
void num_bytes(T state, Fixnum* obj) { num_bytes(obj); num_chars(nil<Fixnum>()); if(type_specific() == eRString) { write_rstring(state); } }
// Removes last element from pooled array void PkPooledRawBitSetArray::pop_back() { // Assert we have elements to pop PkAssert( !empty() ); // Assert that our byte offset indicates we have elements as well PkAssert( get_chunks().back().second >= num_bytes() ); // Assert that byte offset is proper multiple of number of blocks to represent a bit set PkAssert( byte_offset_is_proper_multiple( get_chunks().back().second ) ); // If last element is from pooled chunk, then release it back to pool if ( !is_owned_chunk( num_chunks()-1 ) ) { get_pool_alloc().free( get_back_bit_buffer() ); } // If chunk now has zero elements, remove the chunk if ( 0 == ( get_chunks().back().second -= num_bytes() ) ) { remove_back_chunk(); } // Update our size --m_size; // Assert that we are empty or new back chunk has elements PkAssert( empty() || (get_chunks().back().second >= num_bytes()) ); }
Fixnum* String::tr_replace(STATE, struct tr_data* tr_data) { if(tr_data->last + 1 > (native_int)size() || shared_->true_p()) { CharArray* ba = CharArray::create(state, tr_data->last + 1); data(state, ba); shared(state, Qfalse); } memcpy(byte_address(), tr_data->tr, tr_data->last); byte_address()[tr_data->last] = 0; num_bytes(state, Fixnum::from(tr_data->last)); return Fixnum::from(tr_data->steps); }
Fixnum* String::tr_replace(STATE, struct tr_data* tr_data) { if(tr_data->last > (native_int)size() || shared_->true_p()) { ByteArray* ba = ByteArray::create(state, tr_data->last + 1); data(state, ba); shared(state, Qfalse); } std::memcpy(data_->bytes, tr_data->tr, tr_data->last); data_->bytes[tr_data->last] = 0; num_bytes(state, Fixnum::from(tr_data->last)); characters(state, num_bytes_); return Fixnum::from(tr_data->steps); }
String* String::append(STATE, const char* other, native_int length) { native_int current_size = size(); native_int data_size = as<CharArray>(data_)->size(); // Clamp the string size the maximum underlying byte array size if(unlikely(current_size > data_size)) { current_size = data_size; } native_int new_size = current_size + length; native_int capacity = data_size; if(capacity < new_size + 1) { // capacity needs one extra byte of room for the trailing null do { // @todo growth should be more intelligent than doubling capacity *= 2; } while(capacity < new_size + 1); // No need to call unshare and duplicate a CharArray // just to throw it away. if(shared_ == Qtrue) shared(state, Qfalse); CharArray* ba = CharArray::create(state, capacity); memcpy(ba->raw_bytes(), byte_address(), current_size); data(state, ba); } else { if(shared_ == Qtrue) unshare(state); } // Append on top of the null byte at the end of s1, not after it memcpy(byte_address() + current_size, other, length); // The 0-based index of the last character is new_size - 1 byte_address()[new_size] = 0; num_bytes(state, Fixnum::from(new_size)); hash_value(state, nil<Fixnum>()); return this; }
Object* String::secure_compare(STATE, String* other) { native_int s1 = num_bytes()->to_native(); native_int s2 = other->num_bytes()->to_native(); native_int d1 = as<CharArray>(data_)->size(); native_int d2 = as<CharArray>(other->data_)->size(); if(unlikely(s1 > d1)) { s1 = d1; } if(unlikely(s2 > d2)) { s2 = d2; } native_int max = (s2 > s1) ? s2 : s1; uint8_t* p1 = byte_address(); uint8_t* p2 = other->byte_address(); uint8_t* p1max = p1 + s1; uint8_t* p2max = p2 + s2; uint8_t sum = 0; for(native_int i = 0; i < max; i++) { uint8_t* c1 = p1 + i; uint8_t* c2 = p2 + i; uint8_t b1 = (c1 >= p1max) ? 0 : *c1; uint8_t b2 = (c2 >= p2max) ? 0 : *c2; sum |= (b1 ^ b2); } return (sum == 0) ? Qtrue : Qfalse; }
void num_bytes(T state, Fixnum* obj) { num_bytes(obj); num_chars(nil<Fixnum>()); update_handle(state); }
// Returns the number of bytes this String contains native_int byte_size() const { return num_bytes()->to_native(); }
fixed_bit_vector& fixed_bit_vector_manager::fill1(fixed_bit_vector& bv) const { memset(bv.m_data, 0xFF, num_bytes()); return bv; }
void fixed_bit_vector_manager::copy(fixed_bit_vector& dst, fixed_bit_vector const& src) const { memcpy(dst.m_data, src.m_data, num_bytes()); }