int main(){

int bigArray[10]={1,-1,4,-1,-1,6,-1,8,-1,9};
int smallArray[5]={2,5,7,10,11};

printf("\nPress enter to compress array to right : \n");
getch();

AlignRight(bigArray,10);

printf("\nPress enter to see modified array : \n");
getch();

int i;

for(i=0;i<10;i++){
printf("%d  ",bigArray[i]);
}

MergeArray(bigArray,smallArray,10,5);

printf("\nPress enter to see merged array : \n");
getch();

for(i=0;i<10;i++){
printf("%d  ",bigArray[i]);
}






return 0;
}
uptr HwasanChunkView::Beg() const {
  if (metadata_ && metadata_->right_aligned)
    return AlignRight(block_, metadata_->requested_size);
  return block_;
}
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
                            bool zeroise) {
  if (orig_size > kMaxAllowedMallocSize) {
    if (AllocatorMayReturnNull()) {
      Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
             orig_size);
      return nullptr;
    }
    ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
  }

  alignment = Max(alignment, kShadowAlignment);
  uptr size = TaggedSize(orig_size);
  Thread *t = GetCurrentThread();
  void *allocated;
  if (t) {
    allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
  } else {
    SpinMutexLock l(&fallback_mutex);
    AllocatorCache *cache = &fallback_allocator_cache;
    allocated = allocator.Allocate(cache, size, alignment);
  }
  if (UNLIKELY(!allocated)) {
    SetAllocatorOutOfMemory();
    if (AllocatorMayReturnNull())
      return nullptr;
    ReportOutOfMemory(size, stack);
  }
  Metadata *meta =
      reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
  meta->requested_size = static_cast<u32>(orig_size);
  meta->alloc_context_id = StackDepotPut(*stack);
  meta->right_aligned = false;
  if (zeroise) {
    internal_memset(allocated, 0, size);
  } else if (flags()->max_malloc_fill_size > 0) {
    uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
    internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
  }
  if (!right_align_mode)
    internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
                    size - orig_size);

  void *user_ptr = allocated;
  // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
  // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
  // retag to 0.
  if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
      atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
    tag_t tag = flags()->tag_in_malloc && malloc_bisect(stack, orig_size)
                    ? (t ? t->GenerateRandomTag() : kFallbackAllocTag)
                    : 0;
    user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, tag);
  }

  if ((orig_size % kShadowAlignment) && (alignment <= kShadowAlignment) &&
      right_align_mode) {
    uptr as_uptr = reinterpret_cast<uptr>(user_ptr);
    if (right_align_mode == kRightAlignAlways ||
        GetTagFromPointer(as_uptr) & 1) {  // use a tag bit as a random bit.
      user_ptr = reinterpret_cast<void *>(AlignRight(as_uptr, orig_size));
      meta->right_aligned = 1;
    }
  }

  HWASAN_MALLOC_HOOK(user_ptr, size);
  return user_ptr;
}