Esempio n. 1
0
int main(int argc, char *argv[])
{
    struct Ustr *s1 = USTR("");
    static const char txt[] = "123456789 ";
    size_t beg = 0;
    size_t end = 6;
    size_t count = 0;

    if (argc == 3) {
        beg = atoi(argv[1]);
        end = atoi(argv[2]);
    }

    while (count < beg) {
        ustr_add_rep_chr(&s1, txt[count++ % 10], 1);
    }

    while (count < end) {

        ustr_add_rep_chr(&s1, txt[count++ % 10], 1);

        printf("String: %lu \"%s\"\n", CLU(ustr_len(s1)), ustr_cstr(s1));

        tst_ustr(ustr_cstr(s1), 0, 0, USTR_TRUE);
        tst_ustr(ustr_cstr(s1), 1, 0, USTR_FALSE);
        tst_ustr(ustr_cstr(s1), 2, 1, USTR_TRUE);
        tst_ustr(ustr_cstr(s1), 3, 1, USTR_FALSE);
        tst_ustr(ustr_cstr(s1), 4, 2, USTR_TRUE);
        tst_ustr(ustr_cstr(s1), 5, 2, USTR_FALSE);
        tst_ustr(ustr_cstr(s1), 6, 4, USTR_TRUE);
        tst_ustr(ustr_cstr(s1), 7, 4, USTR_FALSE);

        if (USTR_CONF_HAVE_68bit_SIZE_MAX) {
            tst_ustr(ustr_cstr(s1), 8, 8, USTR_TRUE);
            tst_ustr(ustr_cstr(s1), 9, 8, USTR_FALSE);
        }

        printf("\t strdup()            = (%8lu / %-8lu = %5.2f%% )\n",
               CLU(ustr_len(s1) + 1), CLU(ustr_len(s1)),
               100. * ((1 + ustr_len(s1)) / ustr_len(s1)));

        printf("\t NM_Ustr             = (%8lu / %-8lu = %5.2f%% )\n",
               CLU(sizeof(struct NM_Ustr) + ustr_len(s1) + 1),
               CLU(sizeof(struct NM_Ustr) + ustr_len(s1)),
               (100. * (sizeof(struct NM_Ustr) + ustr_len(s1) + 1)) / ustr_len(s1));
        printf("\t NM_Ustr x2          = (%8lu / %-8lu = %5.2f%% )\n",
               CLU(sizeof(struct NM_Ustr) + min_pow(ustr_len(s1) + 1)),
               CLU(sizeof(struct NM_Ustr) + ustr_len(s1)),
               (100. * (sizeof(struct NM_Ustr) + min_pow(ustr_len(s1) + 1))) / ustr_len(s1));
        printf("\t NM_Ustr xUstr       = (%8lu / %-8lu = %5.2f%% )\n",
               CLU(sizeof(struct NM_Ustr) + min_size(ustr_len(s1) + 1)),
               CLU(sizeof(struct NM_Ustr) + ustr_len(s1)),
               (100. * (sizeof(struct NM_Ustr) + min_size(ustr_len(s1) + 1))) / ustr_len(s1));
    }

    ustr_sc_free(&s1);

    return 0;
}
Esempio n. 2
0
void gs_renderstop(enum gs_draw_mode mode)
{
	graphics_t graphics = thread_graphics;
	size_t i, num = graphics->verts.num;

	if (!num) {
		if (!graphics->using_immediate) {
			da_free(graphics->verts);
			da_free(graphics->norms);
			da_free(graphics->colors);
			for (i = 0; i < 16; i++)
				da_free(graphics->texverts[i]);
			vbdata_destroy(graphics->vbd);
		}

		return;
	}

	if (graphics->norms.num &&
	    (graphics->norms.num != graphics->verts.num)) {
		blog(LOG_WARNING, "gs_renderstop: normal count does "
		                  "not match vertex count");
		num = min_size(num, graphics->norms.num);
	}

	if (graphics->colors.num &&
	    (graphics->colors.num != graphics->verts.num)) {
		blog(LOG_WARNING, "gs_renderstop: color count does "
		                  "not match vertex count");
		num = min_size(num, graphics->colors.num);
	}

	if (graphics->texverts[0].num &&
	    (graphics->texverts[0].num  != graphics->verts.num)) {
		blog(LOG_WARNING, "gs_renderstop: texture vertex count does "
		                  "not match vertex count");
		num = min_size(num, graphics->texverts[0].num);
	}

	if (graphics->using_immediate) {
		vertexbuffer_flush(graphics->immediate_vertbuffer, false);

		gs_load_vertexbuffer(graphics->immediate_vertbuffer);
		gs_load_indexbuffer(NULL);
		gs_draw(mode, 0, (uint32_t)num);

		reset_immediate_arrays(graphics);
	} else {
		vertbuffer_t vb = gs_rendersave();

		gs_load_vertexbuffer(vb);
		gs_load_indexbuffer(NULL);
		gs_draw(mode, 0, 0);

		vertexbuffer_destroy(vb);
	}

	graphics->vbd = NULL;
}
Esempio n. 3
0
PLAB::PLAB(size_t desired_plab_sz_) :
  _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
  _end(NULL), _hard_end(NULL), _allocated(0), _wasted(0), _undo_wasted(0)
{
  // ArrayOopDesc::header_size depends on command line initialization.
  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? align_object_size(arrayOopDesc::header_size(T_INT)) : 0;
  assert(min_size() > AlignmentReserve,
         err_msg("Minimum PLAB size " SIZE_FORMAT " must be larger than alignment reserve " SIZE_FORMAT " "
                 "to be able to contain objects", min_size(), AlignmentReserve));
}
/**
 * 根据统计信息调整当前线程的本地分配缓冲区的基准大小
 */
void ThreadLocalAllocBuffer::resize() {

  if (ResizeTLAB) {	//允许调整线程的本地分配缓冲区大小
    // Compute the next tlab size using expected allocation amount
    size_t alloc = (size_t)(_allocation_fraction.average() *
                            (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize));
    size_t new_size = alloc / _target_refills;

    //根据本地分配缓冲区大小允许的最大值/最小值来调整缓冲区的新大小
    new_size = MIN2(MAX2(new_size, min_size()), max_size());

    //内存对齐后的大小
    size_t aligned_new_size = align_object_size(new_size);

    if (PrintTLAB && Verbose) {
      gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
                          " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n",
                          myThread(), myThread()->osthread()->thread_id(),
                          _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
    }

    set_desired_size(aligned_new_size);

    set_refill_waste_limit(initial_refill_waste_limit());
  }
}
Esempio n. 5
0
/* TODO: optimize mixing */
static void mix_float(uint8_t *mix_in, struct circlebuf *buf, size_t size)
{
	float *mix = (float*)mix_in;
	float vals[MIX_BUFFER_SIZE];
	register float mix_val;

	while (size) {
		size_t pop_count = min_size(size, sizeof(vals));
		size -= pop_count;

		circlebuf_pop_front(buf, vals, pop_count);
		pop_count /= sizeof(float);

		/* This sequence provides hints for MSVC to use packed SSE 
		 * instructions addps, minps, maxps, etc. */
		for (size_t i = 0; i < pop_count; i++) {
			mix_val =  *mix + vals[i];

			/* clamp confuses the optimisation */
			mix_val = (mix_val >  1.0f) ?  1.0f : mix_val; 
			mix_val = (mix_val < -1.0f) ? -1.0f : mix_val;

			*(mix++) = mix_val;
		}
	}
}
Esempio n. 6
0
/* TODO: optimize mixing */
static void mix_float(struct audio_output *audio, struct audio_line *line,
		size_t size, size_t time_offset, size_t plane)
{
	float *mixes[MAX_AUDIO_MIXES];
	float vals[MIX_BUFFER_SIZE];

	for (size_t mix_idx = 0; mix_idx < MAX_AUDIO_MIXES; mix_idx++) {
		uint8_t *bytes = audio->mixes[mix_idx].mix_buffers[plane].array;
		mixes[mix_idx] = (float*)&bytes[time_offset];
	}

	while (size) {
		size_t pop_count = min_size(size, sizeof(vals));
		size -= pop_count;

		circlebuf_pop_front(&line->buffers[plane], vals, pop_count);
		pop_count /= sizeof(float);

		for (size_t mix_idx = 0; mix_idx < MAX_AUDIO_MIXES; mix_idx++) {
			/* only include this audio line in this mix if it's set
			 * via the line's 'mixes' variable */
			if ((line->mixers & (1 << mix_idx)) == 0)
				continue;

			for (size_t i = 0; i < pop_count; i++) {
				*(mixes[mix_idx]++) += vals[i];
			}
		}
	}
}
Esempio n. 7
0
void wxRibbonBar::RecalculateMinSize()
{
    wxSize min_size(wxDefaultCoord, wxDefaultCoord);
    size_t numtabs = m_pages.GetCount();
    if(numtabs != 0)
    {
        min_size = m_pages.Item(0).page->GetMinSize();

        size_t i;
        for(i = 1; i < numtabs; ++i)
        {
            wxRibbonPageTabInfo& info = m_pages.Item(i);
            wxSize page_min = info.page->GetMinSize();

            min_size.x = wxMax(min_size.x, page_min.x);
            min_size.y = wxMax(min_size.y, page_min.y);
        }
    }
    if(min_size.y != wxDefaultCoord)
    {
        // TODO: Decide on best course of action when min height is unspecified
        // - should we specify it to the tab minimum, or leave it unspecified?
        min_size.IncBy(0, m_tab_height);
    }

    m_minWidth = min_size.GetWidth();
    m_minHeight = m_arePanelsShown ? min_size.GetHeight() : m_tab_height;
}
Esempio n. 8
0
void ewah_add_dirty_words(
	struct ewah_bitmap *self, const eword_t *buffer,
	size_t number, int negate)
{
	size_t literals, can_add;

	while (1) {
		literals = rlw_get_literal_words(self->rlw);
		can_add = min_size(number, RLW_LARGEST_LITERAL_COUNT - literals);

		rlw_set_literal_words(self->rlw, literals + can_add);

		if (self->buffer_size + can_add >= self->alloc_size)
			buffer_grow(self, (self->buffer_size + can_add) * 3 / 2);

		if (negate) {
			size_t i;
			for (i = 0; i < can_add; ++i)
				self->buffer[self->buffer_size++] = ~buffer[i];
		} else {
			memcpy(self->buffer + self->buffer_size,
				buffer, can_add * sizeof(eword_t));
			self->buffer_size += can_add;
		}

		self->bit_size += can_add * BITS_IN_EWORD;

		if (number - can_add == 0)
			break;

		buffer_push_rlw(self, 0);
		buffer += can_add;
		number -= can_add;
	}
}
Esempio n. 9
0
// Compute desired plab size and latch result for later
// use. This should be called once at the end of parallel
// scavenge; it clears the sensor accumulators.
void PLABStats::adjust_desired_plab_sz() {
  assert(ResizePLAB, "Not set");
  if (_allocated == 0) {
    assert(_unused == 0, "Inconsistency in PLAB stats");
    _allocated = 1;
  }
  double wasted_frac    = (double)_unused/(double)_allocated;
  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
                                   TargetPLABWastePct);
  if (target_refills == 0) {
    target_refills = 1;
  }
  _used = _allocated - _wasted - _unused;
  size_t plab_sz = _used/(target_refills*ParallelGCThreads);
  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
  // Take historical weighted average
  _filter.sample(plab_sz);
  // Clip from above and below, and align to object boundary
  plab_sz = MAX2(min_size(), (size_t)_filter.average());
  plab_sz = MIN2(max_size(), plab_sz);
  plab_sz = align_object_size(plab_sz);
  // Latch the result
  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
  if (ResizePLAB) {
    _desired_plab_sz = plab_sz;
  }
  // Now clear the accumulators for next round:
  // note this needs to be fixed in the case where we
  // are retaining across scavenges. FIX ME !!! XXX
  _allocated = 0;
  _wasted    = 0;
  _unused    = 0;
}
Esempio n. 10
0
kSwarmSetup::kSwarmSetup( TQWidget *parent, const char *name )
	: KDialogBase( parent, name, true, i18n( "Setup Swarm Screen Saver" ),
	  Ok|Cancel|Help, Ok, true )
{
	readSettings();

	setButtonText( Help, i18n( "A&bout" ) );
	TQWidget *main = makeMainWidget();

	TQHBoxLayout *top = new TQHBoxLayout( main, 0, spacingHint() );
	TQVBoxLayout *left = new TQVBoxLayout(top, spacingHint());

	TQLabel *label = new TQLabel( i18n("Speed:"), main );
	min_size(label);
	left->addWidget(label);

	TQSlider *slider = new TQSlider(MINSPEED, MAXSPEED, 10, speed,
			Qt::Horizontal, main );
	slider->setMinimumSize( 120, 20 );
    slider->setTickmarks(TQSlider::Below);
    slider->setTickInterval(10);
	connect( slider, TQT_SIGNAL( valueChanged( int ) ),
		 TQT_SLOT( slotSpeed( int ) ) );
	left->addWidget(slider);

	label = new TQLabel( i18n("Number of bees:"), main );
	min_size(label);
	left->addWidget(label);

	slider = new TQSlider(MINBATCH, MAXBATCH, 20, maxLevels,Qt::Horizontal, main );
	slider->setMinimumSize( 120, 20 );
    slider->setTickmarks(TQSlider::Below);
    slider->setTickInterval(20);
	connect( slider, TQT_SIGNAL( valueChanged( int ) ),
		 TQT_SLOT( slotLevels( int ) ) );
	left->addWidget(slider);
	left->addStretch();

	preview = new TQWidget( main );
	preview->setFixedSize( 220, 170 );
	preview->setBackgroundColor( black );
	preview->show();    // otherwise saver does not get correct size
	saver = new kSwarmSaver( preview->winId() );
	top->addWidget(preview);

	top->addStretch();
}
void ThreadLocalAllocBuffer::startup_initialization() {

  // Assuming each thread's active tlab is, on average,
  // 1/2 full at a GC
  _target_refills = 100 / (2 * TLABWasteTargetPercent);
  _target_refills = MAX2(_target_refills, (unsigned)1U);

  _global_stats = new GlobalTLABStats();

  // During jvm startup, the main (primordial) thread is initialized
  // before the heap is initialized.  So reinitialize it now.
  guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread");
#ifdef COLORED_TLABS
  if (UseColoredSpaces) {
    Thread::current()->tlab(HC_RED).initialize();
    Thread::current()->tlab(HC_BLUE).initialize();

    Thread::current()->tlab(HC_RED).set_color(HC_RED);
    Thread::current()->tlab(HC_BLUE).set_color(HC_BLUE);

    if (PrintTLAB && Verbose) {
      gclog_or_tty->print("TLAB min: " SIZE_FORMAT " initial: " SIZE_FORMAT " max: " SIZE_FORMAT "\n",
                          min_size(), Thread::current()->tlab(HC_RED).initial_desired_size(), max_size());
    }
    if (PrintTLAB && Verbose) {
      gclog_or_tty->print("TLAB min: " SIZE_FORMAT " initial: " SIZE_FORMAT " max: " SIZE_FORMAT "\n",
                          min_size(), Thread::current()->tlab(HC_BLUE).initial_desired_size(), max_size());
    }
  } else {
    Thread::current()->tlab().initialize();

    if (PrintTLAB && Verbose) {
      gclog_or_tty->print("TLAB min: " SIZE_FORMAT " initial: " SIZE_FORMAT " max: " SIZE_FORMAT "\n",
                          min_size(), Thread::current()->tlab().initial_desired_size(), max_size());
    }
  }
#else
  Thread::current()->tlab().initialize();

  if (PrintTLAB && Verbose) {
    gclog_or_tty->print("TLAB min: " SIZE_FORMAT " initial: " SIZE_FORMAT " max: " SIZE_FORMAT "\n",
                        min_size(), Thread::current()->tlab().initial_desired_size(), max_size());
  }
#endif
}
Esempio n. 12
0
// Compute desired plab size for one gc worker thread and latch result for later
// use. This should be called once at the end of parallel
// scavenge; it clears the sensor accumulators.
void PLABStats::adjust_desired_plab_sz() {
  log_plab_allocation();

  if (!ResizePLAB) {
    // Clear accumulators for next round.
    reset();
    return;
  }

  assert(is_object_aligned(max_size()) && min_size() <= max_size(),
         "PLAB clipping computation may be incorrect");

  if (_allocated == 0) {
    assert(_unused == 0,
           "Inconsistency in PLAB stats: "
           "_allocated: " SIZE_FORMAT ", "
           "_wasted: " SIZE_FORMAT ", "
           "_unused: " SIZE_FORMAT ", "
           "_undo_wasted: " SIZE_FORMAT,
           _allocated, _wasted, _unused, _undo_wasted);

    _allocated = 1;
  }
  double wasted_frac    = (double)_unused / (double)_allocated;
  size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
  if (target_refills == 0) {
    target_refills = 1;
  }
  size_t used = _allocated - _wasted - _unused;
  // Assumed to have 1 gc worker thread
  size_t recent_plab_sz = used / target_refills;
  // Take historical weighted average
  _filter.sample(recent_plab_sz);
  // Clip from above and below, and align to object boundary
  size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
  new_plab_sz = MIN2(max_size(), new_plab_sz);
  new_plab_sz = align_object_size(new_plab_sz);
  // Latch the result
  _desired_net_plab_sz = new_plab_sz;

  log_sizing(recent_plab_sz, new_plab_sz);

  reset();
}
size_t ThreadLocalAllocBuffer::initial_desired_size() {
  size_t init_sz;

  if (TLABSize > 0) {
    init_sz = MIN2(TLABSize / HeapWordSize, max_size());
  } else if (global_stats() == NULL) {
    // Startup issue - main thread initialized before heap initialized.
    init_sz = min_size();
  } else {
    // Initial size is a function of the average number of allocating threads.
    unsigned nof_threads = global_stats()->allocating_threads_avg();

    init_sz  = (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize) /
                      (nof_threads * target_refills());
    init_sz = align_object_size(init_sz);
    init_sz = MIN2(MAX2(init_sz, min_size()), max_size());
  }
  return init_sz;
}
// Compute desired plab size and latch result for later
// use. This should be called once at the end of parallel
// scavenge; it clears the sensor accumulators.
void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
  assert(ResizePLAB, "Not set");

  assert(is_object_aligned(max_size()) && min_size() <= max_size(),
         "PLAB clipping computation may be incorrect");

  if (_allocated == 0) {
    assert(_unused == 0,
           err_msg("Inconsistency in PLAB stats: "
                   "_allocated: "SIZE_FORMAT", "
                   "_wasted: "SIZE_FORMAT", "
                   "_unused: "SIZE_FORMAT", "
                   "_used  : "SIZE_FORMAT,
                   _allocated, _wasted, _unused, _used));

    _allocated = 1;
  }
  double wasted_frac    = (double)_unused/(double)_allocated;
  size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
                                   TargetPLABWastePct);
  if (target_refills == 0) {
    target_refills = 1;
  }
  _used = _allocated - _wasted - _unused;
  size_t plab_sz = _used/(target_refills*no_of_gc_workers);
  if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
  // Take historical weighted average
  _filter.sample(plab_sz);
  // Clip from above and below, and align to object boundary
  plab_sz = MAX2(min_size(), (size_t)_filter.average());
  plab_sz = MIN2(max_size(), plab_sz);
  plab_sz = align_object_size(plab_sz);
  // Latch the result
  if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
  _desired_plab_sz = plab_sz;
  // Now clear the accumulators for next round:
  // note this needs to be fixed in the case where we
  // are retaining across scavenges. FIX ME !!! XXX
  _allocated = 0;
  _wasted    = 0;
  _unused    = 0;
}
    virtual GG::Pt MinUsableSize() const {
        GG::Pt min_size(GG::X0, GG::Y0);

        for (std::vector<ToggleData*>::const_iterator it = m_toggles.begin();
             it != m_toggles.end(); ++it)
        { min_size.x += (*it)->button->Width() + OPTION_BUTTON_PADDING; }

        min_size.y = OPTION_BAR_HEIGHT;

        return min_size;
    }
Esempio n. 16
0
ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
  _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
  _end(NULL), _hard_end(NULL),
  _retained(false), _retained_filler(),
  _allocated(0), _wasted(0)
{
  assert (min_size() > AlignmentReserve, "Inconsistency!");
  // arrayOopDesc::header_size depends on command line initialization.
  FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
  AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
}
Esempio n. 17
0
static int aomc_cmp(const void *v1, const void *v2)
{
    const AoM_Block *p1 = (const AoM_Block *)v1;
    const AoM_Block *p2 = (const AoM_Block *)v2;

    int rc = memcmp(p1->blk_data, p2->blk_data, min_size(p1->blk_size, p2->blk_size));
    if (rc != 0 || p1->blk_size == p2->blk_size)
        return rc;
    if (p1->blk_size < p2->blk_size)
        return -1;
    return +1;
}
Esempio n. 18
0
void
SDLRenderer::apply_viewport()
{
  Size target_size = (g_config->use_fullscreen && g_config->fullscreen_size != Size(0, 0)) ?
    g_config->fullscreen_size :
    g_config->window_size;

  float pixel_aspect_ratio = 1.0f;
  if (g_config->aspect_size != Size(0, 0))
  {
    pixel_aspect_ratio = calculate_pixel_aspect_ratio(m_desktop_size,
                                                      g_config->aspect_size);
  }
  else if (g_config->use_fullscreen)
  {
    pixel_aspect_ratio = calculate_pixel_aspect_ratio(m_desktop_size,
                                                      target_size);
  }

  // calculate the viewport
  Size max_size(1280, 800);
  Size min_size(640, 480);

  Size logical_size;
  calculate_viewport(min_size, max_size,
                     target_size,
                     pixel_aspect_ratio,
                     g_config->magnification,
                     m_scale, logical_size, m_viewport);

  SCREEN_WIDTH = logical_size.width;
  SCREEN_HEIGHT = logical_size.height;

  if (m_viewport.x != 0 || m_viewport.y != 0)
  {
    // Clear the screen to avoid garbage in unreachable areas after we
    // reset the coordinate system
    SDL_SetRenderDrawColor(m_renderer, 0, 0, 0, 255);
    SDL_SetRenderDrawBlendMode(m_renderer, SDL_BLENDMODE_NONE);
    SDL_RenderClear(m_renderer);
    SDL_RenderPresent(m_renderer);
    SDL_RenderClear(m_renderer);
  }

  // SetViewport() works in scaled screen coordinates, so we have to
  // reset it to 1.0, 1.0 to get meaningful results
  SDL_RenderSetScale(m_renderer, 1.0f, 1.0f);
  SDL_RenderSetViewport(m_renderer, &m_viewport);
  SDL_RenderSetScale(m_renderer, m_scale.x, m_scale.y);
}
Esempio n. 19
0
// Compute desired plab size and latch result for later
// use. This should be called once at the end of parallel
// scavenge; it clears the sensor accumulators.
void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
  assert(ResizePLAB, "Not set");

  assert(is_object_aligned(max_size()) && min_size() <= max_size(),
         "PLAB clipping computation may be incorrect");

  if (_allocated == 0) {
    assert(_unused == 0,
           err_msg("Inconsistency in PLAB stats: "
                   "_allocated: "SIZE_FORMAT", "
                   "_wasted: "SIZE_FORMAT", "
                   "_unused: "SIZE_FORMAT,
                   _allocated, _wasted, _unused));

    _allocated = 1;
  }
  double wasted_frac    = (double)_unused / (double)_allocated;
  size_t target_refills = (size_t)((wasted_frac * TargetSurvivorRatio) / TargetPLABWastePct);
  if (target_refills == 0) {
    target_refills = 1;
  }
  size_t used = _allocated - _wasted - _unused;
  size_t recent_plab_sz = used / (target_refills * no_of_gc_workers);
  // Take historical weighted average
  _filter.sample(recent_plab_sz);
  // Clip from above and below, and align to object boundary
  size_t new_plab_sz = MAX2(min_size(), (size_t)_filter.average());
  new_plab_sz = MIN2(max_size(), new_plab_sz);
  new_plab_sz = align_object_size(new_plab_sz);
  // Latch the result
  if (PrintPLAB) {
    gclog_or_tty->print(" (plab_sz = " SIZE_FORMAT" desired_plab_sz = " SIZE_FORMAT") ", recent_plab_sz, new_plab_sz);
  }
  _desired_plab_sz = new_plab_sz;

  reset();
}
Esempio n. 20
0
int main()
{
  std::string word = "";
  std::vector<std::string> words;
  
  while(std::cin >> word)
    {
      words.push_back(word);
    }
  
  min_size(words);
  max_size(words);
  mode(words);
  return 0;
}
GG::Pt GraphicalSummaryWnd::MinUsableSize() const {
    GG::Pt min_size(GG::X0, GG::Y0);

    // Ask the bar sizer for the required size to display the side bars because
    // it contains all of the useful sizing information in the first place,
    // even though it does not derive from GG::Wnd and have a virtual
    // MinUsableSize function.
    if (m_sizer)
        min_size += m_sizer->GetMinSize();

    if (m_options_bar) {
        GG::Pt options_bar_min_size(m_options_bar->MinUsableSize());
        min_size.x = std::max(min_size.x, options_bar_min_size.x);
        min_size.y += options_bar_min_size.y;
    }

    return min_size;
}
void ThreadLocalAllocBuffer::resize() {
  // Compute the next tlab size using expected allocation amount
  assert(ResizeTLAB, "Should not call this otherwise");
  size_t alloc = (size_t)(_allocation_fraction.average() *
                          (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize));
  size_t new_size = alloc / _target_refills;

  new_size = MIN2(MAX2(new_size, min_size()), max_size());

  size_t aligned_new_size = align_object_size(new_size);

  if (PrintTLAB && Verbose) {
    gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
                        " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n",
                        p2i(myThread()), myThread()->osthread()->thread_id(),
                        _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
  }
  set_desired_size(aligned_new_size);
  set_refill_waste_limit(initial_refill_waste_limit());
}
Esempio n. 23
0
ssize_t fuse_buf_copy(struct fuse_bufvec *dstv, struct fuse_bufvec *srcv,
		      enum fuse_buf_copy_flags flags)
{
	size_t copied = 0;

	if (dstv == srcv)
		return fuse_buf_size(dstv);

	for (;;) {
		const struct fuse_buf *src = fuse_bufvec_current(srcv);
		const struct fuse_buf *dst = fuse_bufvec_current(dstv);
		size_t src_len;
		size_t dst_len;
		size_t len;
		ssize_t res;

		if (src == NULL || dst == NULL)
			break;

		src_len = src->size - srcv->off;
		dst_len = dst->size - dstv->off;
		len = min_size(src_len, dst_len);

		res = fuse_buf_copy_one(dst, dstv->off, src, srcv->off, len, flags);
		if (res < 0) {
			if (!copied)
				return res;
			break;
		}
		copied += res;

		if (!fuse_bufvec_advance(srcv, res) ||
		    !fuse_bufvec_advance(dstv, res))
			break;

		if (res < len)
			break;
	}

	return copied;
}
Esempio n. 24
0
int app_buffer_get(mic_tcp_payload app_buff)
{
    while(app_buffer_count == 0)
    {
        usleep(1000);
    }
    
    mic_tcp_payload tmp;

    app_buffer* current;
    if(app_buffer_count > 0)    
    {
        pthread_mutex_lock(&lock);
        tmp.size = app_buffer_first->packet.size;
        tmp.data = app_buffer_first->packet.data;

        current = app_buffer_first;
        if(app_buffer_count == 1)
        {
            app_buffer_first = app_buffer_last = NULL;
        }
        else
        {            
            app_buffer_first = app_buffer_first->next;
        }
        
        int tsize = min_size(tmp.size, app_buff.size);

        memcpy(app_buff.data, tmp.data, tsize);

        app_buffer_size -= tmp.size;
        app_buffer_count --;
        free(current);
        free(tmp.data);
        
        pthread_mutex_unlock(&lock);  
        
        return tmp.size;       
    }      
}
Esempio n. 25
0
/**
 * this method is the main method that will be called if the the program is running a 
 * compare character by character. 
 * The methods will iterate through each character and the moment one of the characters are not the same. 
 * the program will return a 0. If both files are the same but one ends before another, the program will also return a 0;
 * 
 * @param  f1 first file pointer
 * @param  f2 second file pointer 
 * @param  f1_name name1 of file as a string literal
 * @param  f2_name name2 of file as a string literal
*  @return  1 if successful (if files comapered are considered equal per character) , 0 if failed;
 */
int comp_by_char(FILE * fp1, FILE *fp2, char f1_name[], char f2_name[], int ignore_caps){
	long file_len1, file_len2, file_len_min, file_len_max;
	int c1, c2; /*the chars retrived at each location*/
	size_t count = 1;
	char *eof_on_file;	
	file_len1 = get_file_size(fp1);
	file_len2 = get_file_size(fp2);
	
	if (empty_files(file_len1,file_len2)){
		return 1;
	}

	if(one_empty_file(file_len1,file_len2)){
		printf("files differ: char %d\n",(int)count);
		return 0;
	}
	

	file_len_min = min_size(file_len1, file_len2);
	file_len_max = max_size(file_len1, file_len2); 

	init_read(fp1); /*goes to beginning of file*/
	init_read(fp2);/*goes to beginning of file*/
	while((c1 = (ignore_caps? tolower(fgetc(fp1)):(fgetc(fp1)))) != EOF
	 		&&(c2 = (ignore_caps? tolower(fgetc(fp2)):(fgetc(fp2)))) != EOF){ /*while neither c1 nor c2 are EOF*/
				if(c1 != c2){ /* assigne c1,c2 and compare*/
					printf("files differ: char %ld\n",(long)count);	
					return 0; /*return at not equal*/
				}
				count++; 	
	}
	
	if(file_len_min != file_len_max){
			eof_on_file = (file_len1 == file_len_min)? f1_name: f2_name;
			printf("EOF on %s\n", eof_on_file);	
			return 0; /*return at not equal*/
	} /* get next char, should be EOF*/	
	
	return 1;
}
Esempio n. 26
0
static inline bool mix_audio_line(struct audio_output *audio,
		struct audio_line *line, size_t size, uint64_t timestamp)
{
	size_t time_offset = (size_t)ts_diff_bytes(audio,
			line->base_timestamp, timestamp);
	if (time_offset > size)
		return false;

	size -= time_offset;

#ifdef DEBUG_AUDIO
	blog(LOG_DEBUG, "shaved off %lu bytes", size);
#endif

	for (size_t i = 0; i < audio->planes; i++) {
		size_t pop_size = min_size(size, line->buffers[i].size);

		mix_float(audio, line, pop_size, time_offset, i);
	}

	return true;
}
Esempio n. 27
0
static size_t add_empty_words(struct ewah_bitmap *self, int v, size_t number)
{
	size_t added = 0;
	eword_t runlen, can_add;

	if (rlw_get_run_bit(self->rlw) != v && rlw_size(self->rlw) == 0) {
		rlw_set_run_bit(self->rlw, v);
	} else if (rlw_get_literal_words(self->rlw) != 0 ||
			rlw_get_run_bit(self->rlw) != v) {
		buffer_push_rlw(self, 0);
		if (v) rlw_set_run_bit(self->rlw, v);
		added++;
	}

	runlen = rlw_get_running_len(self->rlw);
	can_add = min_size(number, RLW_LARGEST_RUNNING_COUNT - runlen);

	rlw_set_running_len(self->rlw, runlen + can_add);
	number -= can_add;

	while (number >= RLW_LARGEST_RUNNING_COUNT) {
		buffer_push_rlw(self, 0);
		added++;
		if (v) rlw_set_run_bit(self->rlw, v);
		rlw_set_running_len(self->rlw, RLW_LARGEST_RUNNING_COUNT);
		number -= RLW_LARGEST_RUNNING_COUNT;
	}

	if (number > 0) {
		buffer_push_rlw(self, 0);
		added++;

		if (v) rlw_set_run_bit(self->rlw, v);
		rlw_set_running_len(self->rlw, number);
	}

	return added;
}
Esempio n. 28
0
static void
domain_changed_event(GtkEntry* entry, gpointer _data)
{
    JoinDialog* dialog = (JoinDialog*) _data;
    const gchar* domain_text = NULL;
    const gchar* prefix_text = NULL;
    gchar* dot = NULL;
    gchar* new_text = NULL;

    domain_text = gtk_entry_get_text(entry);
    prefix_text = gtk_entry_get_text(dialog->prefix_entry);

    if (!strncasecmp(domain_text, prefix_text, min_size(strlen(domain_text), strlen(prefix_text))))
    {
        new_text = g_utf8_strup(domain_text, -1);
        dot = strchr(new_text, '.');
        if (dot)
        {
            *dot = '\0';
        }
        gtk_entry_set_text(dialog->prefix_entry, new_text);
        g_free(new_text);
    }
}
Esempio n. 29
0
// Calculates plab size for current number of gc worker threads.
size_t PLABStats::desired_plab_sz(uint no_of_gc_workers) {
  return MAX2(min_size(), (size_t)align_object_size(_desired_net_plab_sz / no_of_gc_workers));
}
Esempio n. 30
0
ssize_t NaClImcRecvTypedMessage(
    struct NaClDesc               *channel,
    struct NaClImcTypedMsgHdr     *nitmhp,
    int                           flags,
    struct NaClDescQuotaInterface *quota_interface) {
  int                       supported_flags;
  ssize_t                   retval;
  char                      *recv_buf;
  size_t                    user_bytes;
  NaClHandle                kern_handle[NACL_ABI_IMC_DESC_MAX];
  struct NaClIOVec          recv_iov;
  struct NaClMessageHeader  recv_hdr;
  ssize_t                   total_recv_bytes;
  struct NaClInternalHeader intern_hdr;
  size_t                    recv_user_bytes_avail;
  size_t                    tmp;
  char                      *user_data;
  size_t                    iov_copy_size;
  struct NaClDescXferState  xfer;
  struct NaClDesc           *new_desc[NACL_ABI_IMC_DESC_MAX];
  int                       xfer_status;
  size_t                    i;
  size_t                    num_user_desc;

  NaClLog(4,
          "Entered NaClImcRecvTypedMsg(0x%08"NACL_PRIxPTR", "
          "0x%08"NACL_PRIxPTR", %d)\n",
          (uintptr_t) channel, (uintptr_t) nitmhp, flags);

  supported_flags = NACL_ABI_IMC_NONBLOCK;
  if (0 != (flags & ~supported_flags)) {
    NaClLog(LOG_WARNING,
            "WARNING: NaClImcRecvTypedMsg: unknown IMC flag used: 0x%x\n",
            flags);
    flags &= supported_flags;
  }

  if (nitmhp->iov_length > NACL_ABI_IMC_IOVEC_MAX) {
    NaClLog(4, "gather/scatter array too large\n");
    return -NACL_ABI_EINVAL;
  }
  if (nitmhp->ndesc_length > NACL_ABI_IMC_USER_DESC_MAX) {
    NaClLog(4, "handle vector too long\n");
    return -NACL_ABI_EINVAL;
  }

  user_bytes = 0;
  for (i = 0; i < nitmhp->iov_length; ++i) {
    if (user_bytes > SIZE_T_MAX - nitmhp->iov[i].length) {
      NaClLog(4, "integer overflow in iov length summation\n");
      return -NACL_ABI_EINVAL;
    }
    user_bytes += nitmhp->iov[i].length;
  }
  /*
   * if user_bytes > NACL_ABI_IMC_USER_BYTES_MAX,
   * we will just never fill up all the buffer space.
   */
  user_bytes = min_size(user_bytes, NACL_ABI_IMC_USER_BYTES_MAX);
  /*
   * user_bytes = \min(\sum_{i=0}{nitmhp->iov_length-1} nitmhp->iov[i].length,
   *                   NACL_ABI_IMC_USER_BYTES_MAX)
   */

  recv_buf = NULL;
  memset(new_desc, 0, sizeof new_desc);
  /*
   * from here on, set retval and jump to cleanup code.
   */

  recv_buf = malloc(NACL_ABI_IMC_BYTES_MAX);
  if (NULL == recv_buf) {
    NaClLog(4, "no memory for receive buffer\n");
    retval = -NACL_ABI_ENOMEM;
    goto cleanup;
  }

  recv_iov.base = (void *) recv_buf;
  recv_iov.length = NACL_ABI_IMC_BYTES_MAX;

  recv_hdr.iov = &recv_iov;
  recv_hdr.iov_length = 1;

  for (i = 0; i < NACL_ARRAY_SIZE(kern_handle); ++i) {
    kern_handle[i] = NACL_INVALID_HANDLE;
  }

  if (NACL_DESC_IMC_SOCKET == ((struct NaClDescVtbl const *)
                               channel->base.vtbl)->typeTag) {
    /*
     * Channel can transfer access rights.
     */

    recv_hdr.handles = kern_handle;
    recv_hdr.handle_count = NACL_ARRAY_SIZE(kern_handle);
    NaClLog(4, "Connected socket, may transfer descriptors\n");
  } else {
    /*
     * Channel cannot transfer access rights.  The syscall would fail
     * if recv_iov.length is non-zero.
     */

    recv_hdr.handles = (NaClHandle *) NULL;
    recv_hdr.handle_count = 0;
    NaClLog(4, "Transferable Data Only socket\n");
  }

  recv_hdr.flags = 0;  /* just to make it obvious; IMC will clear it for us */

  total_recv_bytes = (*((struct NaClDescVtbl const *) channel->base.vtbl)->
                      LowLevelRecvMsg)(channel,
                                       &recv_hdr,
                                       flags);
  if (NaClSSizeIsNegErrno(&total_recv_bytes)) {
    NaClLog(1, "LowLevelRecvMsg failed, returned %"NACL_PRIdS"\n",
            total_recv_bytes);
    retval = total_recv_bytes;
    goto cleanup;
  }
  /* total_recv_bytes >= 0 */

  /*
   * NB: recv_hdr.flags may already contain NACL_ABI_MESSAGE_TRUNCATED
   * and/or NACL_ABI_HANDLES_TRUNCATED.
   *
   * First, parse the NaClInternalHeader and any subsequent fields to
   * extract and internalize the NaClDesc objects from the array of
   * NaClHandle values.
   *
   * Copy out to user buffer.  Possibly additional truncation may occur.
   *
   * Since total_recv_bytes >= 0, the cast to size_t is value preserving.
   */
  if ((size_t) total_recv_bytes < sizeof intern_hdr) {
    NaClLog(4, ("only received %"NACL_PRIdS" (0x%"NACL_PRIxS") bytes,"
                " but internal header is %"NACL_PRIdS" (0x%"NACL_PRIxS
                ") bytes\n"),
            total_recv_bytes, total_recv_bytes,
            sizeof intern_hdr, sizeof intern_hdr);
    retval = -NACL_ABI_EIO;
    goto cleanup;
  }
  memcpy(&intern_hdr, recv_buf, sizeof intern_hdr);
  /*
   * Future code should handle old versions in a backward compatible way.
   */
  if (NACL_HANDLE_TRANSFER_PROTOCOL != intern_hdr.h.xfer_protocol_version) {
    NaClLog(4, ("protocol version mismatch:"
                " got %x, but can only handle %x\n"),
            intern_hdr.h.xfer_protocol_version, NACL_HANDLE_TRANSFER_PROTOCOL);
    /*
     * The returned value should be a special version mismatch error
     * code that, along with the recv_buf, permit retrying with later
     * decoders.
     */
    retval = -NACL_ABI_EIO;
    goto cleanup;
  }
  if ((size_t) total_recv_bytes < (intern_hdr.h.descriptor_data_bytes
                                   + sizeof intern_hdr)) {
    NaClLog(4, ("internal header (size %"NACL_PRIdS" (0x%"NACL_PRIxS")) "
                "says there are "
                "%d (0x%x) NRD xfer descriptor bytes, "
                "but we received %"NACL_PRIdS" (0x%"NACL_PRIxS") bytes\n"),
            sizeof intern_hdr, sizeof intern_hdr,
            intern_hdr.h.descriptor_data_bytes,
            intern_hdr.h.descriptor_data_bytes,
            total_recv_bytes, total_recv_bytes);
    retval = -NACL_ABI_EIO;
    goto cleanup;
  }
  recv_user_bytes_avail = (total_recv_bytes
                           - intern_hdr.h.descriptor_data_bytes
                           - sizeof intern_hdr);
  /*
   * NaCl app asked for user_bytes, and we have recv_user_bytes_avail.
   * Set recv_user_bytes_avail to the min of these two values, as well
   * as inform the caller if data truncation occurred.
   */
  if (user_bytes < recv_user_bytes_avail) {
    recv_hdr.flags |= NACL_ABI_RECVMSG_DATA_TRUNCATED;
  }
  recv_user_bytes_avail = min_size(recv_user_bytes_avail, user_bytes);

  retval = recv_user_bytes_avail;  /* default from hence forth */

  /*
   * Let UserDataSize := recv_user_bytes_avail.  (bind to current value)
   */

  user_data = recv_buf + sizeof intern_hdr + intern_hdr.h.descriptor_data_bytes;
  /*
   * Let StartUserData := user_data
   */

  /*
   * Precondition: user_data in [StartUserData, StartUserData + UserDataSize].
   *
   * Invariant:
   *  user_data + recv_user_bytes_avail == StartUserData + UserDataSize
   */
  for (i = 0; i < nitmhp->iov_length && 0 < recv_user_bytes_avail; ++i) {
    iov_copy_size = min_size(nitmhp->iov[i].length, recv_user_bytes_avail);

    memcpy(nitmhp->iov[i].base, user_data, iov_copy_size);

    user_data += iov_copy_size;
    /*
     * subtraction could not underflow due to how recv_user_bytes_avail was
     * computed; however, we are paranoid, in case the code changes.
     */
    tmp = recv_user_bytes_avail - iov_copy_size;
    if (tmp > recv_user_bytes_avail) {
      NaClLog(LOG_FATAL,
              "NaClImcRecvTypedMessage: impossible underflow occurred");
    }
    recv_user_bytes_avail = tmp;

  }
  /*
   * postcondition:  recv_user_bytes_avail == 0.
   *
   * NB: 0 < recv_user_bytes_avail \rightarrow i < nitmhp->iov_length
   * must hold, due to how user_bytes is computed.  We leave the
   * unnecessary test in the loop condition to avoid future code
   * changes from causing problems as defensive programming.
   */

  /*
   * Now extract/internalize the NaClHandles as NaClDesc objects.
   * Note that we will extract beyond nitmhp->desc_length, since we
   * must still destroy the ones that are dropped.
   */
  xfer.next_byte = recv_buf + sizeof intern_hdr;
  xfer.byte_buffer_end = xfer.next_byte + intern_hdr.h.descriptor_data_bytes;
  xfer.next_handle = kern_handle;
  xfer.handle_buffer_end = kern_handle + recv_hdr.handle_count;

  i = 0;
  while (xfer.next_byte < xfer.byte_buffer_end) {
    struct NaClDesc *out;

    xfer_status = NaClDescInternalizeFromXferBuffer(&out, &xfer,
                                                    quota_interface);
    NaClLog(4, "NaClDescInternalizeFromXferBuffer: returned %d\n", xfer_status);
    if (0 == xfer_status) {
      /* end of descriptors reached */
      break;
    }
    if (i >= NACL_ARRAY_SIZE(new_desc)) {
      NaClLog(LOG_FATAL,
              ("NaClImcRecvTypedMsg: trusted peer tried to send too many"
               " descriptors!\n"));
    }
    if (1 != xfer_status) {
      /* xfer_status < 0, out did not receive output */
      retval = -NACL_ABI_EIO;
      goto cleanup;
    }
    new_desc[i] = out;
    out = NULL;
    ++i;
  }
  num_user_desc = i;  /* actual number of descriptors received */
  if (nitmhp->ndesc_length < num_user_desc) {
    nitmhp->flags |= NACL_ABI_RECVMSG_DESC_TRUNCATED;
    num_user_desc = nitmhp->ndesc_length;
  }

  /* transfer ownership to nitmhp->ndescv; some may be left behind */
  for (i = 0; i < num_user_desc; ++i) {
    nitmhp->ndescv[i] = new_desc[i];
    new_desc[i] = NULL;
  }

  /* cast is safe because we clamped num_user_desc earlier to
   * be no greater than the original value of nithmp->ndesc_length.
   */
  nitmhp->ndesc_length = (nacl_abi_size_t)num_user_desc;

  /* retval is number of bytes received */

cleanup:
  free(recv_buf);

  /*
   * Note that we must exercise discipline when constructing NaClDesc
   * objects from NaClHandles -- the NaClHandle values *must* be set
   * to NACL_INVALID_HANDLE after the construction of the NaClDesc
   * where ownership of the NaClHandle is transferred into the NaCDesc
   * object. Otherwise, between new_desc and kern_handle cleanup code,
   * a NaClHandle might be closed twice.
   */
  for (i = 0; i < NACL_ARRAY_SIZE(new_desc); ++i) {
    if (NULL != new_desc[i]) {
      NaClDescUnref(new_desc[i]);
      new_desc[i] = NULL;
    }
  }
  for (i = 0; i < NACL_ARRAY_SIZE(kern_handle); ++i) {
    if (NACL_INVALID_HANDLE != kern_handle[i]) {
      (void) NaClClose(kern_handle[i]);
    }
  }

  NaClLog(3, "NaClImcRecvTypedMsg: returning %"NACL_PRIdS"\n", retval);
  return retval;
}