T* newObj(Args&&... args)
 {
   return (new (allocate(sizeof(T), alignof(T))) T(std::forward<Args>(args)...));
 }
Example #2
1
PackedPayloadHashTable::PackedPayloadHashTable(
    const std::vector<const Type *> &key_types,
    const std::size_t num_entries,
    const std::vector<AggregationHandle *> &handles,
    StorageManager *storage_manager)
    : key_types_(key_types),
      num_handles_(handles.size()),
      handles_(handles),
      total_payload_size_(ComputeTotalPayloadSize(handles)),
      storage_manager_(storage_manager),
      kBucketAlignment(alignof(std::atomic<std::size_t>)),
      kValueOffset(sizeof(std::atomic<std::size_t>) + sizeof(std::size_t)),
      key_manager_(key_types_, kValueOffset + total_payload_size_),
      bucket_size_(ComputeBucketSize(key_manager_.getFixedKeySize())) {
  std::size_t payload_offset_running_sum = sizeof(SpinMutex);
  for (const auto *handle : handles) {
    payload_offsets_.emplace_back(payload_offset_running_sum);
    payload_offset_running_sum += handle->getPayloadSize();
  }

  // NOTE(jianqiao): Potential memory leak / double freeing by copying from
  // init_payload to buckets if payload contains out of line data.
  init_payload_ =
      static_cast<std::uint8_t *>(calloc(this->total_payload_size_, 1));
  DCHECK(init_payload_ != nullptr);

  for (std::size_t i = 0; i < num_handles_; ++i) {
    handles_[i]->initPayload(init_payload_ + payload_offsets_[i]);
  }

  // Bucket size always rounds up to the alignment requirement of the atomic
  // size_t "next" pointer at the front or a ValueT, whichever is larger.
  //
  // Give base HashTable information about what key components are stored
  // inline from 'key_manager_'.
  setKeyInline(key_manager_.getKeyInline());

  // Pick out a prime number of slots and calculate storage requirements.
  std::size_t num_slots_tmp =
      get_next_prime_number(num_entries * kHashTableLoadFactor);
  std::size_t required_memory =
      sizeof(Header) + num_slots_tmp * sizeof(std::atomic<std::size_t>) +
      (num_slots_tmp / kHashTableLoadFactor) *
          (bucket_size_ + key_manager_.getEstimatedVariableKeySize());
  std::size_t num_storage_slots =
      this->storage_manager_->SlotsNeededForBytes(required_memory);
  if (num_storage_slots == 0) {
    FATAL_ERROR(
        "Storage requirement for SeparateChainingHashTable "
        "exceeds maximum allocation size.");
  }

  // Get a StorageBlob to hold the hash table.
  const block_id blob_id =
      this->storage_manager_->createBlob(num_storage_slots);
  this->blob_ = this->storage_manager_->getBlobMutable(blob_id);

  void *aligned_memory_start = this->blob_->getMemoryMutable();
  std::size_t available_memory = num_storage_slots * kSlotSizeBytes;
  if (align(alignof(Header),
            sizeof(Header),
            aligned_memory_start,
            available_memory) == nullptr) {
    // With current values from StorageConstants.hpp, this should be
    // impossible. A blob is at least 1 MB, while a Header has alignment
    // requirement of just kCacheLineBytes (64 bytes).
    FATAL_ERROR(
        "StorageBlob used to hold resizable "
        "SeparateChainingHashTable is too small to meet alignment "
        "requirements of SeparateChainingHashTable::Header.");
  } else if (aligned_memory_start != this->blob_->getMemoryMutable()) {
    // This should also be impossible, since the StorageManager allocates slots
    // aligned to kCacheLineBytes.
    DEV_WARNING("StorageBlob memory adjusted by "
                << (num_storage_slots * kSlotSizeBytes - available_memory)
                << " bytes to meet alignment requirement for "
                << "SeparateChainingHashTable::Header.");
  }

  // Locate the header.
  header_ = static_cast<Header *>(aligned_memory_start);
  aligned_memory_start =
      static_cast<char *>(aligned_memory_start) + sizeof(Header);
  available_memory -= sizeof(Header);

  // Recompute the number of slots & buckets using the actual available memory.
  // Most likely, we got some extra free bucket space due to "rounding up" to
  // the storage blob's size. It's also possible (though very unlikely) that we
  // will wind up with fewer buckets than we initially wanted because of screwy
  // alignment requirements for ValueT.
  std::size_t num_buckets_tmp =
      available_memory /
      (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>) + bucket_size_ +
       key_manager_.getEstimatedVariableKeySize());
  num_slots_tmp =
      get_previous_prime_number(num_buckets_tmp * kHashTableLoadFactor);
  num_buckets_tmp = num_slots_tmp / kHashTableLoadFactor;
  DEBUG_ASSERT(num_slots_tmp > 0);
  DEBUG_ASSERT(num_buckets_tmp > 0);

  // Locate the slot array.
  slots_ = static_cast<std::atomic<std::size_t> *>(aligned_memory_start);
  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
                         sizeof(std::atomic<std::size_t>) * num_slots_tmp;
  available_memory -= sizeof(std::atomic<std::size_t>) * num_slots_tmp;

  // Locate the buckets.
  buckets_ = aligned_memory_start;
  // Extra-paranoid: If ValueT has an alignment requirement greater than that
  // of std::atomic<std::size_t>, we may need to adjust the start of the bucket
  // array.
  if (align(kBucketAlignment, bucket_size_, buckets_, available_memory) ==
      nullptr) {
    FATAL_ERROR(
        "StorageBlob used to hold resizable "
        "SeparateChainingHashTable is too small to meet "
        "alignment requirements of buckets.");
  } else if (buckets_ != aligned_memory_start) {
    DEV_WARNING(
        "Bucket array start position adjusted to meet alignment "
        "requirement for SeparateChainingHashTable's value type.");
    if (num_buckets_tmp * bucket_size_ > available_memory) {
      --num_buckets_tmp;
    }
  }

  // Fill in the header.
  header_->num_slots = num_slots_tmp;
  header_->num_buckets = num_buckets_tmp;
  header_->buckets_allocated.store(0, std::memory_order_relaxed);
  header_->variable_length_bytes_allocated.store(0, std::memory_order_relaxed);
  available_memory -= bucket_size_ * (header_->num_buckets);

  // Locate variable-length key storage region, and give it all the remaining
  // bytes in the blob.
  key_manager_.setVariableLengthStorageInfo(
      static_cast<char *>(buckets_) + header_->num_buckets * bucket_size_,
      available_memory,
      &(header_->variable_length_bytes_allocated));
}
Example #3
0
static void copy_v(void* dst, const S* src, int n, Rest&&... rest) {
    SkASSERTF(((uintptr_t)dst & (alignof(S)-1)) == 0,
              "Expected %p to be aligned for at least %zu bytes.", dst, alignof(S));
    sk_careful_memcpy(dst, src, n*sizeof(S));
    copy_v(SkTAddOffset<void>(dst, n*sizeof(S)), std::forward<Rest>(rest)...);
}
Example #4
0
//----------------------------------------
//テストメイン
int main(const int argc, const char* argv[])
{
	//コンパイラ確認テスト
	printf("Compiler: name=\"%s\", Ver=[%d(0x%08x).%d(0x%08x)]\n", COMPILER_NAME, COMPILER_VER, COMPILER_VER, COMPILER_MINOR, COMPILER_MINOR);
#ifdef IS_VC
	printf("    This compiler is \"Visual C++\"\n");
#endif//IS_VC
#ifdef IS_GCC
	printf("    This compiler is \"GCC\"\n");
#endif//IS_GCC

	//C++言語確認テスト
	printf("\n");
	printf("Compiler-language: %s (C++ Ver.=%d)\n", COMPILER_LANGUAGE, CPP_VER);
#ifdef IS_CPP
	printf("    C++ is available.\n");
#endif//IS_CPP
#ifdef HAS_CPP98
	printf("        C++ is implemented C++98.\n");
#endif//HAS_CPP98
#ifdef HAS_CPP03
	printf("        C++ is implemented C++03.\n");
#endif//HAS_CPP03
#ifdef HAS_CPP11
	printf("        C++ is implemented C++11.\n");
#endif//HAS_CPP11

	//プラットフォーム確認テスト
	printf("\n");
	printf("Plataform: \"%s\"(%s %dbits, %s-endian), Ver=[%d(0x%08x).%d(0x%08x)]\n", PLATFORM_NAME, PLATFORM_ARCHITECTURE, PLATFORM_ARCHITECTURE_BITS, ENDIAN, PLATFORM_VER, PLATFORM_VER, PLATFORM_MINOR, PLATFORM_MINOR);
#ifdef IS_WIN
	printf("    Target plarform is \"Windows\"\n");
#endif//IS_WIN
#ifdef IS_LINUX
	printf("    Target plarform is \"Linux\"\n");
#endif//IS_LINUX
#ifdef IS_CYGWIN
	printf("    Target plarform is \"Cygwin\"\n");
#endif//IS_CYGWIN

	//定義済みマクロ表示テスト
	struct test
	{
		static void func()
		{
			printf("\n");
			printf("__FILE__=\"%s\"\n", __FILE__);
			printf("__LINE__=%d\n", __LINE__);
			printf("__FUNCTION__=\"%s\"\n", __FUNCTION__);
			printf("__PRETTY_FUNCTION__=\"%s\"\n", __PRETTY_FUNCTION__);
			printf("__FUNCSIG__=\"%s\"\n", __FUNCSIG__);
			printf("__func__=\"%s\"\n", __func__);
			printf("__FUNCDNAME__=\"%s\"\n", __FUNCDNAME__);
			printf("__DATE__=\"%s\"\n", __DATE__);
			printf("__TIME__=\"%s\"\n", __TIME__);
			printf("__TIMESTAMP__=\"%s\"\n", __TIMESTAMP__);
			printf("\n");
			printf("GET_FUNC_NAME()=\"%s\"\n", GET_FUNC_NAME());
			printf("GET_FILE_LINE()=\"%s\"\n", GET_FILE_LINE());
			printf("GET_FILE_LINE_TIME()=\"%s\"\n", GET_FILE_LINE_TIME());
		}
	};
	test::func();

	//noinline/always_inlineテスト
	func_normal();
	func_inline();
	func_noinline();
	func_always_inline();

	//【C++11仕様】nullptrテスト
	printf("\n");
	printf("nullptr_var=%p\n", nullptr_var);
#ifdef HAS_NULLPTR
	printf("    'nullptr' is featured.\n");
#endif//HAS_NULLPTR

	//【C++11仕様】overrideテスト
	printf("\n");
	override_func_var.func();
#ifdef HAS_OVERRIDE
	printf("    'override' is featured.\n");
#endif//HAS_OVERRIDE

	//【C++11仕様】constexprテスト
	printf("\n");
	printf("constexpr_var=%d\n", constexpr_var);
	printf("constexpr_calc(1, 2)=%d\n", constexpr_calc(1, 2));
#ifdef HAS_CONSTEXPR
	printf("    'constexpr' is featured.\n");
#endif//HAS_CONSTEXPR

	//【C++11仕様】ユーザー定義リテラルテスト
#ifdef HAS_USER_DEFINED_LITERAL
	printf("\n");
	printf("user_defined_literal_var=%d\n", user_defined_literal_var);
#endif//HAS_USER_DEFINED_LITERAL
#ifdef HAS_USER_DEFINED_LITERAL
	printf("    'operator \"\"'(user defined literal) is featured.\n");
#endif//HAS_USER_DEFINED_LITERAL

	//【C++11仕様】TLSテスト
	printf("\n");
	printf("TLS Variable=%d\n", m_var_tls);
#ifdef HAS_THREAD_LOCAL
	printf("    'thread_local' is featured.\n");
#endif//HAS_THREAD_LOCAL

	//【C++11仕様】アラインメント指定/取得/指定付メモリ確保と解放テスト
	printf("\n");
	printf("sizeof(data_t)=%d\n", sizeof(data_t));
	printf("alignof(data_t)=%d\n", alignof(data_t));//アラインメント取得テスト
	data_t* p = reinterpret_cast<data_t*>(_aligned_malloc(sizeof(data_t), alignof(data_t)));
	printf("_aligned_malloc(sizeof(data_t), alignof(data_t))=%p\n", p);
	_aligned_free(p);
	printf("_aligned_free(p)\n");
#ifdef HAS_ALIGNAS
	printf("    'alignas' is featured.\n");
#endif//HAS_ALIGNAS
#ifdef HAS_ALIGNOF
	printf("    'alignof' is featured.\n");
#endif//HAS_ALIGNOF

	return EXIT_SUCCESS;
}
 T* allocateStruct(size_t align = alignof(T))
 {
   return (T*) allocate(sizeof(T), align);
 }
Example #6
0
void PackedPayloadHashTable::resize(const std::size_t extra_buckets,
                                    const std::size_t extra_variable_storage,
                                    const std::size_t retry_num) {
  // A retry should never be necessary with this implementation of HashTable.
  // Separate chaining ensures that any resized hash table with more buckets
  // than the original table will be able to hold more entries than the
  // original.
  DEBUG_ASSERT(retry_num == 0);

  SpinSharedMutexExclusiveLock<true> write_lock(this->resize_shared_mutex_);

  // Recheck whether the hash table is still full. Note that multiple threads
  // might wait to rebuild this hash table simultaneously. Only the first one
  // should do the rebuild.
  if (!isFull(extra_variable_storage)) {
    return;
  }

  // Approximately double the number of buckets and slots.
  //
  // TODO(chasseur): It may be worth it to more than double the number of
  // buckets here so that we can maintain a good, sparse fill factor for a
  // longer time as more values are inserted. Such behavior should take into
  // account kHashTableLoadFactor.
  std::size_t resized_num_slots = get_next_prime_number(
      (header_->num_buckets + extra_buckets / 2) * kHashTableLoadFactor * 2);
  std::size_t variable_storage_required =
      (resized_num_slots / kHashTableLoadFactor) *
      key_manager_.getEstimatedVariableKeySize();
  const std::size_t original_variable_storage_used =
      header_->variable_length_bytes_allocated.load(std::memory_order_relaxed);
  // If this resize was triggered by a too-large variable-length key, bump up
  // the variable-length storage requirement.
  if ((extra_variable_storage > 0) &&
      (extra_variable_storage + original_variable_storage_used >
       key_manager_.getVariableLengthKeyStorageSize())) {
    variable_storage_required += extra_variable_storage;
  }

  const std::size_t resized_memory_required =
      sizeof(Header) + resized_num_slots * sizeof(std::atomic<std::size_t>) +
      (resized_num_slots / kHashTableLoadFactor) * bucket_size_ +
      variable_storage_required;
  const std::size_t resized_storage_slots =
      this->storage_manager_->SlotsNeededForBytes(resized_memory_required);
  if (resized_storage_slots == 0) {
    FATAL_ERROR(
        "Storage requirement for resized SeparateChainingHashTable "
        "exceeds maximum allocation size.");
  }

  // Get a new StorageBlob to hold the resized hash table.
  const block_id resized_blob_id =
      this->storage_manager_->createBlob(resized_storage_slots);
  MutableBlobReference resized_blob =
      this->storage_manager_->getBlobMutable(resized_blob_id);

  // Locate data structures inside the new StorageBlob.
  void *aligned_memory_start = resized_blob->getMemoryMutable();
  std::size_t available_memory = resized_storage_slots * kSlotSizeBytes;
  if (align(alignof(Header),
            sizeof(Header),
            aligned_memory_start,
            available_memory) == nullptr) {
    // Should be impossible, as noted in constructor.
    FATAL_ERROR(
        "StorageBlob used to hold resized SeparateChainingHashTable "
        "is too small to meet alignment requirements of "
        "LinearOpenAddressingHashTable::Header.");
  } else if (aligned_memory_start != resized_blob->getMemoryMutable()) {
    // Again, should be impossible.
    DEV_WARNING("In SeparateChainingHashTable::resize(), StorageBlob "
                << "memory adjusted by "
                << (resized_num_slots * kSlotSizeBytes - available_memory)
                << " bytes to meet alignment requirement for "
                << "LinearOpenAddressingHashTable::Header.");
  }

  Header *resized_header = static_cast<Header *>(aligned_memory_start);
  aligned_memory_start =
      static_cast<char *>(aligned_memory_start) + sizeof(Header);
  available_memory -= sizeof(Header);

  // As in constructor, recompute the number of slots and buckets using the
  // actual available memory.
  std::size_t resized_num_buckets =
      (available_memory - extra_variable_storage) /
      (kHashTableLoadFactor * sizeof(std::atomic<std::size_t>) + bucket_size_ +
       key_manager_.getEstimatedVariableKeySize());
  resized_num_slots =
      get_previous_prime_number(resized_num_buckets * kHashTableLoadFactor);
  resized_num_buckets = resized_num_slots / kHashTableLoadFactor;

  // Locate slot array.
  std::atomic<std::size_t> *resized_slots =
      static_cast<std::atomic<std::size_t> *>(aligned_memory_start);
  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
                         sizeof(std::atomic<std::size_t>) * resized_num_slots;
  available_memory -= sizeof(std::atomic<std::size_t>) * resized_num_slots;

  // As in constructor, we will be extra paranoid and use align() to locate the
  // start of the array of buckets, as well.
  void *resized_buckets = aligned_memory_start;
  if (align(
          kBucketAlignment, bucket_size_, resized_buckets, available_memory) ==
      nullptr) {
    FATAL_ERROR(
        "StorageBlob used to hold resized SeparateChainingHashTable "
        "is too small to meet alignment requirements of buckets.");
  } else if (resized_buckets != aligned_memory_start) {
    DEV_WARNING(
        "Bucket array start position adjusted to meet alignment "
        "requirement for SeparateChainingHashTable's value type.");
    if (resized_num_buckets * bucket_size_ + variable_storage_required >
        available_memory) {
      --resized_num_buckets;
    }
  }
  aligned_memory_start = static_cast<char *>(aligned_memory_start) +
                         resized_num_buckets * bucket_size_;
  available_memory -= resized_num_buckets * bucket_size_;

  void *resized_variable_length_key_storage = aligned_memory_start;
  const std::size_t resized_variable_length_key_storage_size = available_memory;

  const std::size_t original_buckets_used =
      header_->buckets_allocated.load(std::memory_order_relaxed);

  // Initialize the header.
  resized_header->num_slots = resized_num_slots;
  resized_header->num_buckets = resized_num_buckets;
  resized_header->buckets_allocated.store(original_buckets_used,
                                          std::memory_order_relaxed);
  resized_header->variable_length_bytes_allocated.store(
      original_variable_storage_used, std::memory_order_relaxed);

  // Bulk-copy buckets. This is safe because:
  //     1. The "next" pointers will be adjusted when rebuilding chains below.
  //     2. The hash codes will stay the same.
  //     3. For key components:
  //       a. Inline keys will stay exactly the same.
  //       b. Offsets into variable-length storage will remain valid, because
  //          we also do a byte-for-byte copy of variable-length storage below.
  //       c. Absolute external pointers will still point to the same address.
  //       d. Relative pointers are not used with resizable hash tables.
  //     4. If values are not trivially copyable, then we invoke ValueT's copy
  //        or move constructor with placement new.
  // NOTE(harshad) - Regarding point 4 above, as this is a specialized
  // hash table implemented for aggregation, the values are trivially copyable,
  // therefore we don't need to invoke payload values' copy/move constructors.
  std::memcpy(resized_buckets, buckets_, original_buckets_used * bucket_size_);

  // Copy over variable-length key components, if any.
  if (original_variable_storage_used > 0) {
    DEBUG_ASSERT(original_variable_storage_used ==
                 key_manager_.getNextVariableLengthKeyOffset());
    DEBUG_ASSERT(original_variable_storage_used <=
                 resized_variable_length_key_storage_size);
    std::memcpy(resized_variable_length_key_storage,
                key_manager_.getVariableLengthKeyStorage(),
                original_variable_storage_used);
  }

  destroyPayload();

  // Make resized structures active.
  std::swap(this->blob_, resized_blob);
  header_ = resized_header;
  slots_ = resized_slots;
  buckets_ = resized_buckets;
  key_manager_.setVariableLengthStorageInfo(
      resized_variable_length_key_storage,
      resized_variable_length_key_storage_size,
      &(resized_header->variable_length_bytes_allocated));

  // Drop the old blob.
  const block_id old_blob_id = resized_blob->getID();
  resized_blob.release();
  this->storage_manager_->deleteBlockOrBlobFile(old_blob_id);

  // Rebuild chains.
  void *current_bucket = buckets_;
  for (std::size_t bucket_num = 0; bucket_num < original_buckets_used;
       ++bucket_num) {
    std::atomic<std::size_t> *next_ptr =
        static_cast<std::atomic<std::size_t> *>(current_bucket);
    const std::size_t hash_code = *reinterpret_cast<const std::size_t *>(
        static_cast<const char *>(current_bucket) +
        sizeof(std::atomic<std::size_t>));

    const std::size_t slot_number = hash_code % header_->num_slots;
    std::size_t slot_ptr_value = 0;
    if (slots_[slot_number].compare_exchange_strong(
            slot_ptr_value, bucket_num + 1, std::memory_order_relaxed)) {
      // This bucket is the first in the chain for this block, so reset its
      // next pointer to 0.
      next_ptr->store(0, std::memory_order_relaxed);
    } else {
      // A chain already exists starting from this slot, so put this bucket at
      // the head.
      next_ptr->store(slot_ptr_value, std::memory_order_relaxed);
      slots_[slot_number].store(bucket_num + 1, std::memory_order_relaxed);
    }
    current_bucket = static_cast<char *>(current_bucket) + bucket_size_;
  }
}
    HistoryEntryDataEncoder& operator<<(const String& value)
    {
        // Special case the null string.
        if (value.isNull())
            return *this << std::numeric_limits<uint32_t>::max();

        uint32_t length = value.length();
        *this << length;

        *this << static_cast<uint64_t>(length * sizeof(UChar));
        encodeFixedLengthData(reinterpret_cast<const uint8_t*>(StringView(value).upconvertedCharacters().get()), length * sizeof(UChar), alignof(UChar));

        return *this;
    }
Example #8
0
 * You should have received a copy of the GNU Lesser
 * General Public License along with Marpa::R3.  If not, see
 * http://www.gnu.org/licenses/.
 */

#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "config.h"
#include "marpa_obs.h"
#include "marpa_util.h"
#include "avl.h"

const int minimum_alignment =
  MAX ((int) alignof (struct avl_node), alignof (struct avl_traverser));

/* Creates and returns a new table
   with comparison function |compare| using parameter |param|
   and memory allocator |allocator|.
   Returns |NULL| if memory allocation failed. */
AVL_TREE 
_marpa_avl_create (avl_comparison_func *compare, void *param,
            int requested_alignment)
{
  AVL_TREE tree;
  const int alignment = MAX(minimum_alignment, requested_alignment);
  struct obstack *avl_obstack = my_obstack_begin(0, alignment);

  assert (compare != NULL);
Example #9
0
SerializedBuffer SerializedBuffer::allocate_grouped_buffer(
	MemoryManager &memory_manager,
	size_type maximum_record_count,
	size_type maximum_group_count,
	size_type total_key_size,
	size_type total_value_size,
	identifier_type target_node)
{
	size_type buffer_size = 0;
	// Common header
	const ptrdiff_t common_header_ptrdiff = buffer_size;
	buffer_size += sizeof(SerializedBufferHeader);
	// Keys
	const ptrdiff_t keys_header_ptrdiff = buffer_size;
	buffer_size += sizeof(SerializedKeysHeader);
	buffer_size  = align_ceil(buffer_size, alignof(max_align_t));
	const ptrdiff_t keys_data_ptrdiff = buffer_size;
	buffer_size += align_ceil(total_key_size, alignof(size_type)); // data
	const ptrdiff_t keys_offsets_ptrdiff = buffer_size;
	buffer_size += (maximum_group_count + 1) * sizeof(size_type);  // offsets
	// Values
	const ptrdiff_t values_header_ptrdiff = buffer_size;
	buffer_size += sizeof(SerializedValuesHeader);
	buffer_size  = align_ceil(buffer_size, alignof(max_align_t));
	const ptrdiff_t values_data_ptrdiff = buffer_size;
	buffer_size += align_ceil(total_value_size, alignof(size_type)); // data
	const ptrdiff_t values_offsets_ptrdiff = buffer_size;
	buffer_size += (maximum_record_count + 1) * sizeof(size_type);   // offsets
	buffer_size += (maximum_group_count + 1) * sizeof(size_type);   // group_offsets

	LockedMemoryReference locked_reference;
	if(target_node == TARGET_NODE_UNSPECIFIED){
		locked_reference = memory_manager.allocate(buffer_size).lock();
	}else{
		locked_reference =
			memory_manager.allocate(buffer_size, target_node).lock();
	}
	const auto ptr =
		reinterpret_cast<uintptr_t>(locked_reference.pointer());

	const auto common_header =
		reinterpret_cast<SerializedBufferHeader *>(
			ptr + common_header_ptrdiff);
	common_header->key_buffer_size =
		static_cast<size_type>(
			values_header_ptrdiff - keys_header_ptrdiff);
	common_header->value_buffer_size =
		static_cast<size_type>(buffer_size - values_header_ptrdiff);

	const auto keys_header =
		reinterpret_cast<SerializedKeysHeader *>(
			ptr + keys_header_ptrdiff);
	keys_header->data_buffer_size =
		static_cast<size_type>(keys_offsets_ptrdiff - keys_data_ptrdiff);
	keys_header->record_count = maximum_group_count;

	const auto values_header =
		reinterpret_cast<SerializedValuesHeader *>(
			ptr + values_header_ptrdiff);
	values_header->data_buffer_size =
		static_cast<size_type>(
			values_offsets_ptrdiff - values_data_ptrdiff);
	values_header->maximum_record_count = maximum_record_count;
	values_header->actual_record_count = 0;

	return SerializedBuffer(locked_reference);
}
Example #10
0
/*
 *     Remember all NFS typed partitions.
 */
void init_nfs(void)
{
        struct stat st;
        struct mntent * ent;
	FILE * mnt;

	nlist = (NFS*)0;

	if (stat("/proc/version", &st) < 0)
		return;
	if ((mnt = setmntent("/proc/mounts", "r")) == (FILE*)0)
		return;

	while ((ent = getmntent(mnt))) {
		if (isnetfs(ent->mnt_type)) {
			size_t nlen = strlen(ent->mnt_dir);
			NFS *restrict p;
			xmemalign((void*)&p, sizeof(void*), alignof(NFS)+(nlen+1));
			p->name = ((char*)p)+alignof(NFS);
			p->nlen = nlen;
			p->shadow = (SHADOW*)0;

			strcpy(p->name, ent->mnt_dir);
			if (nlist)
				nlist->prev = p;
			p->next = nlist;
			p->prev = (NFS*)0;
			nlist = p;
		}
	}
	endmntent(mnt);

	if ((mnt = setmntent("/proc/mounts", "r")) == (FILE*)0)
		return;

	while ((ent = getmntent(mnt))) {
		NFS *p;

		for (p = nlist; p; p = p->next) {
			SHADOW * restrict s;
			size_t nlen;

			if (strcmp(ent->mnt_dir, p->name) == 0)
				continue;
			if (strncmp(ent->mnt_dir, p->name, p->nlen) != 0)
				continue;

			nlen = strlen(ent->mnt_dir);
			xmemalign((void*)&s, sizeof(void*), alignof(SHADOW)+(nlen+1));
			s->name = ((char*)s)+alignof(SHADOW);
			s->nlen = nlen;

			strcpy(s->name, ent->mnt_dir);
			if (p->shadow)
			    p->shadow->prev = s;
			s->next = p->shadow;
			s->prev = (SHADOW*)0;
			p->shadow = s;
		}
	}
	endmntent(mnt);
}
Example #11
0
int print_sysv7(size_t* sz, size_t* align, char* buf, struct sysv7* s) {
    *sz = sizeof(struct sysv7);
    *align = alignof(struct sysv7);
    return sprintf(buf, "%d %d %d %d %d", s->j, s->s, s->c, s->t, s->u);
}
Example #12
0
int print_sysv6(size_t* sz, size_t* align, char* buf, struct sysv6* s) {
    *sz = sizeof(struct sysv6);
    *align = alignof(struct sysv6);
    return sprintf(buf, "%d %d %d", s->c, s->d, s->e);
}
Example #13
0
int print_sysv4(size_t* sz, size_t* align, char* buf, union sysv4* s) {
    *sz = sizeof(union sysv4);
    *align = alignof(union sysv4);
    return sprintf(buf, "%d", s->s);
}
Example #14
0
int print_sysv3(size_t* sz, size_t* align, char* buf, struct sysv3* s) {
    *sz = sizeof(struct sysv3);
    *align = alignof(struct sysv3);
    return sprintf(buf, "%d %d", s->c, s->s);
}
Example #15
0
int print_sysv1(size_t* sz, size_t* align, char* buf, struct sysv1* s) {
    *sz = sizeof(struct sysv1);
    *align = alignof(struct sysv1);
    return sprintf(buf, "%d %d %d", s->j, s->k, s->m);
}
Example #16
0
namespace HPHP {

//////////////////////////////////////////////////////////////////////

std::aligned_storage<
  sizeof(ArrayData),
  alignof(ArrayData)
>::type s_theEmptyArray;

struct EmptyArray::Initializer {
  Initializer() {
    void* vpEmpty = &s_theEmptyArray;

    auto const ad   = static_cast<ArrayData*>(vpEmpty);
    ad->m_kind      = ArrayData::kEmptyKind;
    ad->m_size      = 0;
    ad->m_pos       = ArrayData::invalid_index;
    ad->m_count     = 0;
    ad->setStatic();
  }
};
EmptyArray::Initializer EmptyArray::s_initializer;

//////////////////////////////////////////////////////////////////////

void EmptyArray::Release(ArrayData*) {
  always_assert(!"never try to free the empty array");
}

void EmptyArray::NvGetKey(const ArrayData*, TypedValue* out, ssize_t pos) {
  // We have no valid positions---no one should call this function.
  not_reached();
}

size_t EmptyArray::Vsize(const ArrayData*) { not_reached(); }

const Variant& EmptyArray::GetValueRef(const ArrayData* ad, ssize_t pos) {
  // We have no valid positions---no one should call this function.
  not_reached();
}

// Iterators can't be advanced or rewinded, because we have no valid
// iterators.
ssize_t EmptyArray::IterAdvance(const ArrayData*, ssize_t prev) {
  not_reached();
}
ssize_t EmptyArray::IterRewind(const ArrayData*, ssize_t prev) {
  not_reached();
}

// We always return false in ValidMArrayIter, so this should never be
// called.  ValidMArrayIter may be called on this array kind, though,
// because Escalate is a no-op.
bool EmptyArray::AdvanceMArrayIter(ArrayData*, MArrayIter& fp) {
  not_reached();
}

// We're always already a static array.
void EmptyArray::OnSetEvalScalar(ArrayData*) { not_reached(); }
ArrayData* EmptyArray::NonSmartCopy(const ArrayData* ad) { not_reached(); }

//////////////////////////////////////////////////////////////////////

NEVER_INLINE
ArrayData* EmptyArray::Copy(const ArrayData*) {
  auto const cap = kPackedSmallSize;
  auto const ad = static_cast<ArrayData*>(
    MM().objMallocLogged(sizeof(ArrayData) + sizeof(TypedValue) * cap)
  );
  ad->m_kindAndSize = cap;
  ad->m_posAndCount = static_cast<uint32_t>(ArrayData::invalid_index);
  assert(ad->m_kind == ArrayData::kPackedKind);
  assert(ad->m_size == 0);
  assert(ad->m_packedCap == cap);
  assert(ad->m_pos == ArrayData::invalid_index);
  assert(ad->m_count == 0);
  assert(PackedArray::checkInvariants(ad));
  return ad;
}

ArrayData* EmptyArray::CopyWithStrongIterators(const ArrayData* ad) {
  // We can never have associated strong iterators, so we don't need
  // to do anything extra.
  return Copy(ad);
}

//////////////////////////////////////////////////////////////////////

/*
 * Note: if you try to tail-call these helper routines, gcc will
 * unfortunately still generate functions with frames and and makes a
 * call instead of a jump.  It's because of std::pair (and is still
 * the case if you return a custom struct).
 *
 * For now we're leaving this, because it's essentially free for these
 * routines to leave the lval pointer in the second return register,
 * and it seems questionable to clone the whole function just to avoid
 * the frame creation in these callers.  (It works to reinterpret_cast
 * these functions to one that returns ArrayData* instead of a pair in
 * the cases we don't need the second value, but this seems a tad too
 * sketchy for probably-unmeasurable benefits.  I'll admit I didn't
 * try to measure it though... ;)
 */

/*
 * Helper for empty array -> packed transitions.  Creates an array
 * with one element.  The element is transfered into the array (should
 * already be incref'd).
 */
ALWAYS_INLINE
std::pair<ArrayData*,TypedValue*> EmptyArray::MakePackedInl(TypedValue tv) {
  auto const cap = kPackedSmallSize;
  auto const ad = static_cast<ArrayData*>(
    MM().objMallocLogged(sizeof(ArrayData) + cap * sizeof(TypedValue))
  );
  ad->m_kindAndSize = uint64_t{1} << 32 | cap; // also set kind
  ad->m_posAndCount = 0;

  auto& lval = *reinterpret_cast<TypedValue*>(ad + 1);
  lval.m_data = tv.m_data;
  lval.m_type = tv.m_type;

  assert(ad->m_kind == ArrayData::kPackedKind);
  assert(ad->m_size == 1);
  assert(ad->m_pos == 0);
  assert(ad->m_count == 0);
  assert(ad->m_packedCap == cap);
  assert(PackedArray::checkInvariants(ad));
  return { ad, &lval };
}

NEVER_INLINE
std::pair<ArrayData*,TypedValue*> EmptyArray::MakePacked(TypedValue tv) {
  return MakePackedInl(tv);
}

/*
 * Helper for creating a single-element mixed array with a string key.
 *
 * Note: the key is not already incref'd, but the value must be.
 */
NEVER_INLINE
std::pair<ArrayData*,TypedValue*>
EmptyArray::MakeMixed(StringData* key, TypedValue val) {
  auto const mask = MixedArray::SmallMask;            // 3
  auto const cap  = MixedArray::computeMaxElms(mask); // 3
  auto const ad   = smartAllocArray(cap, mask);

  ad->m_kindAndSize = uint64_t{1} << 32 | ArrayData::kMixedKind << 24;
  ad->m_posAndCount = 0;
  ad->m_capAndUsed  = uint64_t{1} << 32 | cap;
  ad->m_tableMask   = mask;
  ad->m_nextKI      = 0;
  ad->m_hLoad       = 1;

  auto const data = reinterpret_cast<MixedArray::Elm*>(ad + 1);
  auto const hash = reinterpret_cast<int32_t*>(data + cap);

  assert(mask + 1 == 4);
  auto const emptyVal = int64_t{MixedArray::Empty};
  reinterpret_cast<int64_t*>(hash)[0] = emptyVal;
  reinterpret_cast<int64_t*>(hash)[1] = emptyVal;

  auto const khash = key->hash();
  hash[khash & mask] = 0;
  data[0].setStrKey(key, khash);

  auto& lval  = data[0].data;
  lval.m_data = val.m_data;
  lval.m_type = val.m_type;

  assert(ad->m_kind == ArrayData::kMixedKind);
  assert(ad->m_size == 1);
  assert(ad->m_pos == 0);
  assert(ad->m_count == 0);
  assert(ad->m_cap == cap);
  assert(ad->m_used == 1);
  assert(ad->checkInvariants());
  return { ad, &lval };
}

/*
 * Creating a single-element mixed array with a integer key.  The
 * value is already incref'd.
 */
std::pair<ArrayData*,TypedValue*>
EmptyArray::MakeMixed(int64_t key, TypedValue val) {
  auto const mask = MixedArray::SmallMask;            // 3
  auto const cap  = MixedArray::computeMaxElms(mask); // 3
  auto const ad   = smartAllocArray(cap, mask);

  ad->m_kindAndSize = uint64_t{1} << 32 | ArrayData::kMixedKind << 24;
  ad->m_posAndCount = 0;
  ad->m_capAndUsed  = uint64_t{1} << 32 | cap;
  ad->m_tableMask   = mask;
  ad->m_nextKI      = key + 1;
  ad->m_hLoad       = 1;

  auto const data = reinterpret_cast<MixedArray::Elm*>(ad + 1);
  auto const hash = reinterpret_cast<int32_t*>(data + cap);

  assert(mask + 1 == 4);
  auto const emptyVal = int64_t{MixedArray::Empty};
  reinterpret_cast<int64_t*>(hash)[0] = emptyVal;
  reinterpret_cast<int64_t*>(hash)[1] = emptyVal;

  hash[key & mask] = 0;
  data[0].setIntKey(key);

  auto& lval  = data[0].data;
  lval.m_data = val.m_data;
  lval.m_type = val.m_type;

  assert(ad->m_kind == ArrayData::kMixedKind);
  assert(ad->m_size == 1);
  assert(ad->m_pos == 0);
  assert(ad->m_count == 0);
  assert(ad->m_cap == cap);
  assert(ad->m_used == 1);
  assert(ad->checkInvariants());
  return { ad, &lval };
}

//////////////////////////////////////////////////////////////////////

ArrayData* EmptyArray::SetInt(ArrayData*, int64_t k, Cell c, bool) {
  // TODO(#3888164): we should make it so we don't need KindOfUninit checks
  if (c.m_type == KindOfUninit) c.m_type = KindOfNull;
  tvRefcountedIncRef(&c);
  auto const ret = k == 0 ? EmptyArray::MakePacked(c)
                          : EmptyArray::MakeMixed(k, c);
  return ret.first;
}

ArrayData* EmptyArray::SetStr(ArrayData*,
                              StringData* k,
                              Cell val,
                              bool copy) {
  tvRefcountedIncRef(&val);
  // TODO(#3888164): we should make it so we don't need KindOfUninit checks
  if (val.m_type == KindOfUninit) val.m_type = KindOfNull;
  return EmptyArray::MakeMixed(k, val).first;
}

ArrayData* EmptyArray::LvalInt(ArrayData*, int64_t k, Variant*& retVar, bool) {
  auto const ret = k == 0 ? EmptyArray::MakePacked(make_tv<KindOfNull>())
                          : EmptyArray::MakeMixed(k, make_tv<KindOfNull>());
  retVar = &tvAsVariant(ret.second);
  return ret.first;
}

ArrayData* EmptyArray::LvalStr(ArrayData*,
                               StringData* k,
                               Variant*& retVar,
                               bool) {
  auto const ret = EmptyArray::MakeMixed(k, make_tv<KindOfNull>());
  retVar = &tvAsVariant(ret.second);
  return ret.first;
}

ArrayData* EmptyArray::LvalNew(ArrayData*, Variant*& retVar, bool) {
  auto const ret = EmptyArray::MakePacked(make_tv<KindOfNull>());
  retVar = &tvAsVariant(ret.second);
  return ret.first;
}

ArrayData* EmptyArray::SetRefInt(ArrayData*,
                                 int64_t k,
                                 Variant& var,
                                 bool) {
  auto ref = *var.asRef();
  tvIncRef(&ref);
  auto const ret = k == 0 ? EmptyArray::MakePacked(ref)
                          : EmptyArray::MakeMixed(k, ref);
  return ret.first;
}

ArrayData* EmptyArray::SetRefStr(ArrayData*,
                                 StringData* k,
                                 Variant& var,
                                 bool) {
  auto ref = *var.asRef();
  tvIncRef(&ref);
  return EmptyArray::MakeMixed(k, ref).first;
}

ArrayData* EmptyArray::Append(ArrayData*, const Variant& vin, bool copy) {
  auto cell = *vin.asCell();
  tvRefcountedIncRef(&cell);
  // TODO(#3888164): we should make it so we don't need KindOfUninit checks
  if (cell.m_type == KindOfUninit) cell.m_type = KindOfNull;
  return EmptyArray::MakePackedInl(cell).first;
}

ArrayData* EmptyArray::AppendRef(ArrayData*, Variant& v, bool copy) {
  auto ref = *v.asRef();
  tvIncRef(&ref);
  return EmptyArray::MakePacked(ref).first;
}

ArrayData* EmptyArray::AppendWithRef(ArrayData*, const Variant& v, bool copy) {
  auto tv = make_tv<KindOfNull>();
  tvAsVariant(&tv).setWithRef(v);
  return EmptyArray::MakePacked(tv).first;
}

//////////////////////////////////////////////////////////////////////

ArrayData* EmptyArray::PlusEq(ArrayData*, const ArrayData* elems) {
  elems->incRefCount();
  return const_cast<ArrayData*>(elems);
}

ArrayData* EmptyArray::Merge(ArrayData*, const ArrayData* elems) {
  // Packed arrays don't need renumbering, so don't make a copy.
  if (elems->isPacked()) {
    elems->incRefCount();
    return const_cast<ArrayData*>(elems);
  }
  // Fast path the common case that elems is mixed.
  if (elems->isMixed()) {
    auto const copy = MixedArray::Copy(elems);
    copy->incRefCount();
    MixedArray::Renumber(copy);
    return copy;
  }
  auto copy = elems->copy();
  copy->incRefCount();
  copy->renumber();
  return copy;
}

ArrayData* EmptyArray::PopOrDequeue(ArrayData* ad, Variant& value) {
  value = uninit_null();
  return ad;
}

ArrayData* EmptyArray::Prepend(ArrayData*, const Variant& vin, bool) {
  auto cell = *vin.asCell();
  tvRefcountedIncRef(&cell);
  // TODO(#3888164): we should make it so we don't need KindOfUninit checks
  if (cell.m_type == KindOfUninit) cell.m_type = KindOfNull;
  return EmptyArray::MakePacked(cell).first;
}

//////////////////////////////////////////////////////////////////////

ArrayData* EmptyArray::ZSetInt(ArrayData* ad, int64_t k, RefData* v) {
  auto const arr = MixedArray::MakeReserveMixed(MixedArray::SmallSize);
  arr->m_count = 0;
  DEBUG_ONLY auto const tmp = arr->zSet(k, v);
  assert(tmp == arr);
  return arr;
}

ArrayData* EmptyArray::ZSetStr(ArrayData* ad, StringData* k, RefData* v) {
  auto const arr = MixedArray::MakeReserveMixed(MixedArray::SmallSize);
  arr->m_count = 0;
  DEBUG_ONLY auto const tmp = arr->zSet(k, v);
  assert(tmp == arr);
  return arr;
}

ArrayData* EmptyArray::ZAppend(ArrayData* ad, RefData* v) {
  auto const arr = MixedArray::MakeReserveMixed(MixedArray::SmallSize);
  arr->m_count = 0;
  DEBUG_ONLY auto const tmp = arr->zAppend(v);
  assert(tmp == arr);
  return arr;
}

//////////////////////////////////////////////////////////////////////

}
Example #17
0
        return allocFlatSmallImpl(len);
    }
    return allocFlatSlowImpl(len);
}

NEVER_INLINE StringData* allocFlatForLenSlow(size_t len) {
    return allocFlatSlowImpl(len);
}

}

//////////////////////////////////////////////////////////////////////

std::aligned_storage<
sizeof(StringData) + 1,
       alignof(StringData)
       >::type s_theEmptyString;

//////////////////////////////////////////////////////////////////////

// Create either a static or an uncounted string.
// Diffrence between static and uncounted is in the lifetime
// of the string. Static are alive for the lifetime of the process.
// Uncounted are not ref counted but will be deleted at some point.
ALWAYS_INLINE
StringData* StringData::MakeShared(folly::StringPiece sl, bool trueStatic) {
    if (UNLIKELY(sl.size() > StringData::MaxSize)) {
        throw_string_too_large(sl.size());
    }

    auto const cc = CapCode::ceil(sl.size());
Example #18
0
SerializedBuffer::SerializedBuffer(LockedMemoryReference mobj)
	: m_memory_object(std::move(mobj))
	, m_common_header(nullptr)
	, m_keys_header(nullptr)
	, m_values_header(nullptr)
	, m_keys_data(nullptr)
	, m_keys_offsets(nullptr)
	, m_values_data(nullptr)
	, m_values_offsets(nullptr)
	, m_values_key_lengths(nullptr)
	, m_values_group_offsets(nullptr)
{
	if(!m_memory_object){ return; }
	const auto base_ptr =
		reinterpret_cast<uintptr_t>(m_memory_object.pointer());
	ptrdiff_t cur_offset = 0;

	m_common_header =
		reinterpret_cast<SerializedBufferHeader *>(base_ptr + cur_offset);
	cur_offset += sizeof(SerializedBufferHeader);
	const auto tail_offset = static_cast<ptrdiff_t>(
		sizeof(SerializedBufferHeader) +
		m_common_header->key_buffer_size +
		m_common_header->value_buffer_size);

	if(m_common_header->key_buffer_size > 0){
		m_keys_header =
			reinterpret_cast<SerializedKeysHeader *>(base_ptr + cur_offset);
		cur_offset += sizeof(SerializedKeysHeader);
		cur_offset  = align_ceil(cur_offset, alignof(max_align_t));
		m_keys_data = reinterpret_cast<void *>(base_ptr + cur_offset);
		cur_offset += m_keys_header->data_buffer_size;
		m_keys_offsets = reinterpret_cast<size_type *>(base_ptr + cur_offset);
		cur_offset +=
			sizeof(size_type) * (m_keys_header->record_count + 1);
	}

	if(m_common_header->value_buffer_size > 0){
		m_values_header =
			reinterpret_cast<SerializedValuesHeader *>(base_ptr + cur_offset);
		cur_offset += sizeof(SerializedValuesHeader);
		cur_offset  = align_ceil(cur_offset, alignof(max_align_t));
		m_values_data = reinterpret_cast<void *>(base_ptr + cur_offset);
		cur_offset += m_values_header->data_buffer_size;
		m_values_offsets =
			reinterpret_cast<size_type *>(base_ptr + cur_offset);
		cur_offset +=
			sizeof(size_type) * (m_values_header->maximum_record_count + 1);
		if(cur_offset == tail_offset){
			// value-only
		}else if(m_common_header->key_buffer_size == 0){
			// key-value
			m_values_key_lengths =
				reinterpret_cast<size_type *>(base_ptr + cur_offset);
			cur_offset +=
				sizeof(size_type) * m_values_header->maximum_record_count;
		}else{
			// grouped
			m_values_group_offsets =
				reinterpret_cast<size_type *>(base_ptr + cur_offset);
			cur_offset +=
				sizeof(size_type) * (m_keys_header->record_count + 1);
		}
	}
}
Example #19
0
      using type = typename substitute< _Handle_ >::type;
    };

  public: /*data*/
    mutable int uv_error = 0;
    ref_count refs;
    type_storage< on_destroy_t > destroy_cb_storage;
    aligned_storage< MAX_PROPERTY_SIZE, MAX_PROPERTY_ALIGN > property_storage;
#ifndef HACK_UV_INTERFACE_PTR
    handle::uv_interface *uv_interface_ptr = nullptr;
#else
    typename _Handle_::uv_interface *uv_interface_ptr = nullptr;
#endif
    //* all the fields placed before should have immutable layout size across the handle class hierarchy *//
    alignas(static_cast< const int >(
        greatest(alignof(::uv_any_handle), alignof(::uv_fs_t))
    )) typename uv_t::type uv_handle_struct = { 0,};

  private: /*constructors*/
    instance()
    {
      property_storage.reset< typename _Handle_::properties >();
      uv_interface_ptr = &_Handle_::uv_interface::instance();
    }
    template< typename... _Args_ > instance(_Args_&&... _args)
    {
      property_storage.reset< typename _Handle_::properties >(std::forward< _Args_ >(_args)...);
      uv_interface_ptr = &_Handle_::uv_interface::instance();
    }

  public: /* constructors*/
Example #20
0
void test_max_align_t()
{
    std::cout << alignof(std::max_align_t) << '\n';
}
Example #21
0
static constexpr inline
span_size_t span_align_of(void)
noexcept {
	return span_size(alignof(T));
}
Example #22
0
#include <iostream>

namespace sdl {

#if GRAEHL_CPP14_TYPETRAITS

using std::aligned_storage_t;
using std::aligned_union_t;
using std::enable_if_t;
using std::conditional_t;
using std::common_type_t;

#define SDL_TYPETRAITS_T(traitname) using std::traitname##_t;

#else
template <std::size_t Len, std::size_t Align = alignof(std::max_align_t)> /*default-alignment*/
using aligned_storage_t = typename std::aligned_storage<Len, Align>::type;

template <std::size_t Len, class... Types>
using aligned_union_t = typename std::aligned_union<Len, Types...>::type;

template <bool B, class T = void>
using enable_if_t = typename std::enable_if<B, T>::type;

template <bool b, class T, class F>
using conditional_t = typename std::conditional<b, T, F>::type;

template <class... T>
using common_type_t = typename std::common_type<T...>::type;

#define SDL_TYPETRAITS_T(traitname) \
 Subsystem* Subsystem::Instance()
 {
         if(mInstance == NULL)
         {
             void* memory = Support::Globals::Instance()->Allocator->Allocate(sizeof(Subsystem), alignof(Subsystem));
             mInstance = new(memory) Subsystem();
         }
         return mInstance;
 }
Example #24
0
namespace stink
{
  /**
   * A memory pool providing blocks of memory of a single SIZE and ALIGNMENT.
   *
   * New blocks are allocated from the system in chunks of CHUNK_SIZE.
   */
  template<std::size_t SIZE, std::size_t ALIGNMENT = alignof(::max_align_t), std::size_t CHUNK_SIZE = 1 << 8,
      typename Storage_ = boost::aligned_storage<
          boost::mpl::max<typename boost::mpl::size_t<SIZE>,
              typename boost::mpl::sizeof_<typename boost::intrusive::slist_base_hook<> > >::type::value,
          boost::mpl::max<typename boost::mpl::size_t<ALIGNMENT>,
              typename boost::alignment_of<typename boost::intrusive::slist_base_hook<> > >::type::value> >
    class FixedSizePool : private boost::noncopyable
    {
      typedef boost::intrusive::slist_base_hook<> FreeListNode;

      typedef Storage_ Storage;
      typedef boost::intrusive::slist<FreeListNode> FreeList;
      typedef boost::array<Storage, CHUNK_SIZE> Chunk;
      typedef boost::ptr_list<Chunk> Chunks;

      std::size_t mallocCount;
      std::size_t freeCount;

      Chunks chunks;
      FreeList freeList;

      void
      allocateChunk ()
      {
        chunks.push_back (new Chunk); // requires c++17 new with alignment

        for (Storage& storage : chunks.back ())
          {
            FreeListNode *node = reinterpret_cast<FreeListNode*> (&storage);
            new (node) FreeListNode;
            freeList.push_front (*node);
          }
      }

    public:
      FixedSizePool () :
          mallocCount (0), freeCount (0), chunks (), freeList ()
      {
        allocateChunk ();
      }

      ~FixedSizePool ()
      {
        assert(mallocCount == freeCount);
      }

      void*
      malloc ()
      {
        if (freeList.empty ())
          {
            allocateChunk ();
          }

        FreeListNode *node = &freeList.front ();
        freeList.pop_front ();
        node->~slist_base_hook<> ();
        ++mallocCount;
        return reinterpret_cast<void*> (node);
      }

      void
      free (void* t)
      {
        FreeListNode *node = reinterpret_cast<FreeListNode*> (t);
        new (node) FreeListNode;
        freeList.push_front (*node);
        ++freeCount;
        assert(([this]()->bool
          { // check we have no double-frees
            std::set<FreeListNode*> s;
            for(auto &n: freeList) s.insert(&n);
            return s.size() == freeList.size();
          }) ());
      }
    };
}
Example #25
0
File: align.C Project: 0day-ci/gcc
int
main ()
{
  if (sizeof  (char)                    !=  1)
    return 1;
  if (alignof (char)                    !=  1)
    return 2;
  if (sizeof  (signed char)             !=  1)
    return 3;
  if (alignof (signed char)             !=  1)
    return 4;
  if (sizeof  (unsigned char)           !=  1)
    return 5;  
  if (alignof (unsigned char)           !=  1)
    return 6;
  if (sizeof  (short)                   !=  2)
    return 7;
  if (alignof (short)                   !=  2)
    return 8;
  if (sizeof  (signed short)            !=  2)
    return 9;
  if (alignof (signed short)            !=  2)
    return 10;
  if (sizeof  (unsigned short)          !=  2)
    return 11;
  if (alignof (unsigned short)          !=  2)
    return 12;
  if (sizeof  (int)                     !=  4)
    return 13;
  if (alignof (int)                     !=  4)
    return 14;
  if (sizeof  (signed int)              !=  4)
    return 15;
  if (alignof (signed int)              !=  4)
    return 16;
  if (sizeof  (unsigned int)            !=  4)
    return 17;
  if (alignof (unsigned int)            !=  4)
    return 18;
  if (sizeof  (enum A)                  !=  4)
    return 19;
  if (alignof (enum A)                  !=  4)
    return 20;
#ifdef HAVE_IA64_TYPES
  if (sizeof  (__int64)                 !=  8)
    return 21;
  if (alignof (__int64)                 !=  8)
    return 22;
  if (sizeof  (signed __int64)          !=  8)
    return 23;
  if (alignof (signed ___int64)         !=  8)
    return 24;
  if (sizeof  (unsigned __int64)        !=  8)
    return 25;
  if (alignof (unsigned __int64)        !=  8)
    return 26;
  if (sizeof  (__int128)                != 16)
    return 27;
  if (alignof (__int128)                != 16)
    return 28;
  if (sizeof  (signed __int128)         != 16)
    return 29;
  if (alignof (signed ___int128)        != 16)
    return 30;
  if (sizeof  (unsigned __int128)       != 16)
    return 31;
  if (alignof (unsigned ___int128)      != 16)
    return 32;
#endif  /* HAVE_IA64_TYPES  */
  if (sizeof  (void *)                  !=  4)
    return 33;
  if (alignof (void *)                  !=  4)
    return 34;
  if (sizeof  (void (*) ())             !=  4)
    return 35;
  if (alignof (void (*) ())             !=  4)
    return 36;
  if (sizeof  (float)                   !=  4)
    return 37;
  if (alignof (float)                   !=  4)
    return 38;
  if (sizeof  (double)                  !=  8)
    return 39;
  if (alignof (double)                  !=  8)
    return 40;
#ifdef HAVE_IA64_TYPES
  if (sizeof  (__float80)               != 16)
    return 41;
  if (alignof (__float80)               != 16)
    return 42;
  if (sizeof  (__float128)              != 16)
    return 43;
  if (alignof (__float128)              != 16)
    return 44;
#endif  /* HAVE_IA64_TYPES  */

  return 0;
}
Example #26
0
}

static inline __always_inline bool __state_have_pending_writers(int state) {
  return state & STATE_HAVE_PENDING_WRITERS_FLAG;
}

static inline __always_inline bool __state_have_pending_readers_or_writers(int state) {
  return state & STATE_HAVE_PENDING_READERS_OR_WRITERS_FLAG;
}

static_assert(sizeof(pthread_rwlock_t) == sizeof(pthread_rwlock_internal_t),
              "pthread_rwlock_t should actually be pthread_rwlock_internal_t in implementation.");

// For binary compatibility with old version of pthread_rwlock_t, we can't use more strict
// alignment than 4-byte alignment.
static_assert(alignof(pthread_rwlock_t) == 4,
             "pthread_rwlock_t should fulfill the alignment requirement of pthread_rwlock_internal_t.");

static inline __always_inline pthread_rwlock_internal_t* __get_internal_rwlock(pthread_rwlock_t* rwlock_interface) {
  return reinterpret_cast<pthread_rwlock_internal_t*>(rwlock_interface);
}

int pthread_rwlock_init(pthread_rwlock_t* rwlock_interface, const pthread_rwlockattr_t* attr) {
  pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);

  memset(rwlock, 0, sizeof(pthread_rwlock_internal_t));

  if (__predict_false(attr != NULL)) {
    rwlock->pshared = __rwlockattr_getpshared(attr);
    int kind = __rwlockattr_getkind(attr);
    switch (kind) {
Example #27
0
 void deallocate(T* p, std::size_t n)
 {
     void* vp = static_cast<void*>(p);
     P->countDealloc(vp, n*sizeof(T), alignof(T));
     ::operator delete(vp);
 }
static inline bool decodeStringText(ArgumentDecoder& decoder, uint32_t length, String& result)
{
    // Before allocating the string, make sure that the decoder buffer is big enough.
    if (!decoder.bufferIsLargeEnoughToContain<CharacterType>(length)) {
        decoder.markInvalid();
        return false;
    }
    
    CharacterType* buffer;
    String string = String::createUninitialized(length, buffer);
    if (!decoder.decodeFixedLengthData(reinterpret_cast<uint8_t*>(buffer), length * sizeof(CharacterType), alignof(CharacterType)))
        return false;
    
    result = string;
    return true;    
}
void ArgumentCoder<String>::encode(ArgumentEncoder& encoder, const String& string)
{
    // Special case the null string.
    if (string.isNull()) {
        encoder << std::numeric_limits<uint32_t>::max();
        return;
    }

    uint32_t length = string.length();
    bool is8Bit = string.is8Bit();

    encoder << length << is8Bit;

    if (is8Bit)
        encoder.encodeFixedLengthData(reinterpret_cast<const uint8_t*>(string.characters8()), length * sizeof(LChar), alignof(LChar));
    else
        encoder.encodeFixedLengthData(reinterpret_cast<const uint8_t*>(string.characters16()), length * sizeof(UChar), alignof(UChar));
}
Example #30
-2
int print_date2(size_t* sz, size_t* align, char* buf, struct Date2* d) {
    *sz = sizeof(struct Date2);
    *align = alignof(struct Date2);
    return sprintf(buf, "%d %d %d %d", d->nWeekDay, d->nMonthDay, d->nMonth, d->nYear);
}