Beispiel #1
0
static int prep_more_ios(struct submitter *s, int max_ios)
{
	struct io_sq_ring *ring = &s->sq_ring;
	unsigned index, tail, next_tail, prepped = 0;

	next_tail = tail = *ring->tail;
	do {
		next_tail++;
		read_barrier();
		if (next_tail == *ring->head)
			break;

		index = tail & sq_ring_mask;
		init_io(s, index);
		ring->array[index] = index;
		prepped++;
		tail = next_tail;
	} while (prepped < max_ios);

	if (*ring->tail != tail) {
		/* order tail store with writes to sqes above */
		write_barrier();
		*ring->tail = tail;
		write_barrier();
	}
	return prepped;
}
Beispiel #2
0
static void fio_gtod_update(void)
{
	if (fio_tv) {
		struct timeval __tv;

		gettimeofday(&__tv, NULL);
		fio_tv->tv_sec = __tv.tv_sec;
		write_barrier();
		fio_tv->tv_usec = __tv.tv_usec;
		write_barrier();
	}
}
Beispiel #3
0
/* -----------------------------------------------------------------------------
   Evaluate a THUNK_SELECTOR if possible.

   p points to a THUNK_SELECTOR that we want to evaluate.  The
   result of "evaluating" it will be evacuated and a pointer to the
   to-space closure will be returned.

   If the THUNK_SELECTOR could not be evaluated (its selectee is still
   a THUNK, for example), then the THUNK_SELECTOR itself will be
   evacuated.
   -------------------------------------------------------------------------- */
static void
unchain_thunk_selectors(StgSelector *p, StgClosure *val)
{
    StgSelector *prev;

    prev = NULL;
    while (p)
    {
        ASSERT(p->header.info == &stg_WHITEHOLE_info);
        // val must be in to-space.  Not always: when we recursively
        // invoke eval_thunk_selector(), the recursive calls will not
        // evacuate the value (because we want to select on the value,
        // not evacuate it), so in this case val is in from-space.
        // ASSERT(!HEAP_ALLOCED_GC(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED));

        prev = (StgSelector*)((StgClosure *)p)->payload[0];

        // Update the THUNK_SELECTOR with an indirection to the
        // value.  The value is still in from-space at this stage.
        //
        // (old note: Why not do upd_evacuee(q,p)?  Because we have an
        // invariant that an EVACUATED closure always points to an
        // object in the same or an older generation (required by
        // the short-cut test in the EVACUATED case, below).
        if ((StgClosure *)p == val) {
            // must be a loop; just leave a BLACKHOLE in place.  This
            // can happen when we have a chain of selectors that
            // eventually loops back on itself.  We can't leave an
            // indirection pointing to itself, and we want the program
            // to deadlock if it ever enters this closure, so
            // BLACKHOLE is correct.

            // XXX we do not have BLACKHOLEs any more; replace with
            // a THUNK_SELECTOR again.  This will go into a loop if it is
            // entered, and should result in a NonTermination exception.
            ((StgThunk *)p)->payload[0] = val;
            write_barrier();
            SET_INFO((StgClosure *)p, &stg_sel_0_upd_info);
        } else {
            ((StgInd *)p)->indirectee = val;
            write_barrier();
            SET_INFO((StgClosure *)p, &stg_IND_info);
        }

        // For the purposes of LDV profiling, we have created an
        // indirection.
        LDV_RECORD_CREATE(p);

        p = prev;
    }
}
Beispiel #4
0
/*
 * It is up to the caller to fill in the object's fields in a meaningful
 * fashion!
 */
object *factor_vm::allot_large_object(cell type, cell size)
{
	/* If tenured space does not have enough room, collect and compact */
	if(!data->tenured->can_allot_p(size))
	{
		primitive_compact_gc();

		/* If it still won't fit, grow the heap */
		if(!data->tenured->can_allot_p(size))
		{
			gc(collect_growing_heap_op,
				size, /* requested size */
				true /* trace contexts? */);
		}
	}

	object *obj = data->tenured->allot(size);

	/* Allows initialization code to store old->new pointers
	without hitting the write barrier in the common case of
	a nursery allocation */
	write_barrier(obj,size);

	obj->initialize(type);
	return obj;
}
Beispiel #5
0
  Object* Tuple::put(STATE, native_int idx, Object* val) {
    assert(idx >= 0 && idx < num_fields());

    this->field[idx] = val;
    if(val->reference_p()) write_barrier(state, val);
    return val;
  }
Beispiel #6
0
STATIC_INLINE void
copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
         StgClosure *src, nat size, nat gen_no, StgWord tag)
{
    StgPtr to, from;
    nat i;

    to = alloc_for_copy(size,gen_no);

    from = (StgPtr)src;
    to[0] = (W_)info;
    for (i = 1; i < size; i++) { // unroll for small i
        to[i] = from[i];
    }

    // if somebody else reads the forwarding pointer, we better make
    // sure there's a closure at the end of it.
    write_barrier();
    *p = TAG_CLOSURE(tag,(StgClosure*)to);
    src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);

//  if (to+size+2 < bd->start + BLOCK_SIZE_W) {
//      __builtin_prefetch(to + size + 2, 1);
//  }

#ifdef PROFILING
    // We store the size of the just evacuated object in the LDV word so that
    // the profiler can guess the position of the next object later.
    SET_EVACUAEE_FOR_LDV(from, size);
#endif
}
Beispiel #7
0
/* Allocates memory */
string* factor_vm::reallot_string(string* str_, cell capacity) {
  data_root<string> str(str_, this);

  if (reallot_string_in_place_p(str.untagged(), capacity)) {
    str->length = tag_fixnum(capacity);

    if (to_boolean(str->aux)) {
      byte_array* aux = untag<byte_array>(str->aux);
      aux->capacity = tag_fixnum(capacity * 2);
    }

    return str.untagged();
  } else {
    cell to_copy = string_capacity(str.untagged());
    if (capacity < to_copy)
      to_copy = capacity;

    data_root<string> new_str(allot_string_internal(capacity), this);

    memcpy(new_str->data(), str->data(), to_copy);

    if (to_boolean(str->aux)) {
      byte_array* new_aux = allot_uninitialized_array<byte_array>(capacity * 2);
      new_str->aux = tag<byte_array>(new_aux);
      write_barrier(&new_str->aux);

      byte_array* aux = untag<byte_array>(str->aux);
      memcpy(new_aux->data<uint16_t>(), aux->data<uint16_t>(),
             to_copy * sizeof(uint16_t));
    }

    fill_string(new_str.untagged(), to_copy, capacity, '\0');
    return new_str.untagged();
  }
}
Beispiel #8
0
void fiber_manager_yield(fiber_manager_t* manager)
{
    assert(fiber_manager_state == FIBER_MANAGER_STATE_STARTED);
    assert(manager);
    if(wsd_work_stealing_deque_size(manager->schedule_from) == 0) {
        wsd_work_stealing_deque_t* const temp = manager->schedule_from;
        manager->schedule_from = manager->store_to;
        manager->store_to = temp;
    }

    do {
        manager->yield_count += 1;
        //occasionally steal some work from threads with more load
        if((manager->yield_count & 1023) == 0) {
            fiber_load_balance(manager);
        }

        if(wsd_work_stealing_deque_size(manager->schedule_from) > 0) {
            fiber_t* const new_fiber = (fiber_t*)wsd_work_stealing_deque_pop_bottom(manager->schedule_from);
            if(new_fiber != WSD_EMPTY && new_fiber != WSD_ABORT) {
                fiber_t* const old_fiber = manager->current_fiber;
                if(old_fiber->state == FIBER_STATE_RUNNING) {
                    old_fiber->state = FIBER_STATE_READY;
                    manager->to_schedule = old_fiber;/* must schedule it *after* fiber_swap_context, else another thread can start executing an invalid context */
                }
                manager->current_fiber = new_fiber;
                new_fiber->state = FIBER_STATE_RUNNING;
                write_barrier();
                fiber_swap_context(&old_fiber->context, &new_fiber->context);

                fiber_manager_do_maintenance();
            }
        }
    } while((manager = fiber_manager_get()) && FIBER_STATE_WAITING == manager->current_fiber->state && fiber_load_balance(manager));
}
Beispiel #9
0
void factor_vm::set_string_nth_slow(string *str_, cell index, cell ch)
{
	data_root<string> str(str_,this);

	byte_array *aux;

	str->data()[index] = ((ch & 0x7f) | 0x80);

	if(to_boolean(str->aux))
		aux = untag<byte_array>(str->aux);
	else
	{
		/* We don't need to pre-initialize the
		byte array with any data, since we
		only ever read from the aux vector
		if the most significant bit of a
		character is set. Initially all of
		the bits are clear. */
		aux = allot_uninitialized_array<byte_array>(untag_fixnum(str->length) * sizeof(u16));

		str->aux = tag<byte_array>(aux);
		write_barrier(&str->aux);
	}

	aux->data<u16>()[index] = (u16)((ch >> 7) ^ 1);
}
Beispiel #10
0
    void set_local(STATE, int pos, Object* val) {
      locals_[pos] = val;

      if(locals_ == heap_locals_) {
        write_barrier(state, val);
      }
    }
Beispiel #11
0
inline void factor_vm::set_array_nth(array* array, cell slot, cell value) {
  FACTOR_ASSERT(slot < array_capacity(array));
  FACTOR_ASSERT(array->type() == ARRAY_TYPE);
  cell* slot_ptr = &array->data()[slot];
  *slot_ptr = value;
  write_barrier(slot_ptr);
}
Beispiel #12
0
    void set_local(STATE, int pos, Object* val) {
      locals_[pos] = val;

      if(!stack_allocated_p()) {
        write_barrier(state, val);
      }
    }
Beispiel #13
0
  Object* Regexp::match_region(STATE, String* string, Fixnum* start,
                               Fixnum* end, Object* forward)
  {
    int beg, max;
    const UChar *str;
    OnigRegion *region;
    Object* md;

    if(unlikely(!onig_data)) {
      Exception::argument_error(state, "Not properly initialized Regexp");
    }

    maybe_recompile(state);

    region = onig_region_new();

    max = string->size();
    str = (UChar*)string->c_str(state);

    int* back_match = onig_data->int_map_backward;

    if(!RTEST(forward)) {
      beg = onig_search(onig_data, str, str + max,
                        str + end->to_native(),
                        str + start->to_native(),
                        region, ONIG_OPTION_NONE);
    } else {
      beg = onig_search(onig_data, str, str + max,
                        str + start->to_native(),
                        str + end->to_native(),
                        region, ONIG_OPTION_NONE);
    }

    // Seems like onig must setup int_map_backward lazily, so we have to watch
    // for it to appear here.
    if(onig_data->int_map_backward != back_match) {
      native_int size = sizeof(int) * ONIG_CHAR_TABLE_SIZE;
      ByteArray* ba = ByteArray::create(state, size);
      memcpy(ba->raw_bytes(), onig_data->int_map_backward, size);

      // Dispose of the old one.
      free(onig_data->int_map_backward);

      onig_data->int_map_backward = reinterpret_cast<int*>(ba->raw_bytes());

      write_barrier(state, ba);
    }


    if(beg == ONIG_MISMATCH) {
      onig_region_free(region, 1);
      return Qnil;
    }

    md = get_match_data(state, region, string, this, 0);
    onig_region_free(region, 1);
    return md;
  }
Beispiel #14
0
void factor_vm::primitive_set_slot() {
  fixnum slot = untag_fixnum(ctx->pop());
  object* obj = untag<object>(ctx->pop());
  cell value = ctx->pop();

  cell* slot_ptr = &obj->slots()[slot];
  *slot_ptr = value;
  write_barrier(slot_ptr);
}
Beispiel #15
0
  Object* Tuple::put(STATE, native_int idx, Object* val) {
    if(idx < 0 || idx >= num_fields()) {
      rubinius::bug("Invalid tuple index");
    }

    field[idx] = val;
    write_barrier(state, val);
    return val;
  }
Beispiel #16
0
void VMMethod::SetCachedFrame(VMFrame* frame) {
    cachedFrame = frame;
    if (frame != nullptr) {
        frame->SetContext(nullptr);
        frame->SetBytecodeIndex(0);
        frame->ResetStackPointer();
        write_barrier(this, cachedFrame);
    }
}
Beispiel #17
0
  void Encoding::make_managed(STATE, const char* name, OnigEncodingType* enc) {
    ByteArray* enc_ba = ByteArray::create(state, sizeof(OnigEncodingType));
    memcpy(enc_ba->raw_bytes(), enc, sizeof(OnigEncodingType));

    encoding_ = reinterpret_cast<OnigEncodingType*>(enc_ba->raw_bytes());
    write_barrier(state, enc_ba);

    int size = strlen(name);
    if(size >= ENCODING_NAMELEN_MAX) size = ENCODING_NAMELEN_MAX-1;

    ByteArray* name_ba = ByteArray::create(state, size);
    memcpy(name_ba->raw_bytes(), name, size);
    name_ba->raw_bytes()[size] = 0;
    encoding_->name = reinterpret_cast<const char*>(name_ba->raw_bytes());
    write_barrier(state, name_ba);

    managed_ = true;
  }
Beispiel #18
0
  Object* Tuple::put(STATE, size_t idx, Object* val) {
    if(num_fields() <= idx) {
      Exception::object_bounds_exceeded_error(state, this, idx);
    }

    this->field[idx] = val;
    if(val->reference_p()) write_barrier(state, val);
    return val;
  }
Beispiel #19
0
/** Add endpoint to the list and queue.
 *
 * @param[in] instance List to use.
 * @param[in] endpoint Endpoint to add.
 *
 * The endpoint is added to the end of the list and queue.
 */
void endpoint_list_add_ep(endpoint_list_t *instance, ohci_endpoint_t *ep)
{
	assert(instance);
	assert(ep);
	usb_log_debug2("Queue %s: Adding endpoint(%p).\n", instance->name, ep);

	fibril_mutex_lock(&instance->guard);

	ed_t *last_ed = NULL;
	/* Add to the hardware queue. */
	if (list_empty(&instance->endpoint_list)) {
		/* There are no active EDs */
		last_ed = instance->list_head;
	} else {
		/* There are active EDs, get the last one */
		ohci_endpoint_t *last = list_get_instance(
		    list_last(&instance->endpoint_list), ohci_endpoint_t, link);
		last_ed = last->ed;
	}
	/* Keep link */
	ep->ed->next = last_ed->next;
	/* Make sure ED is written to the memory */
	write_barrier();

	/* Add ed to the hw queue */
	ed_append_ed(last_ed, ep->ed);
	/* Make sure ED is updated */
	write_barrier();

	/* Add to the sw list */
	list_append(&ep->link, &instance->endpoint_list);

	ohci_endpoint_t *first = list_get_instance(
	    list_first(&instance->endpoint_list), ohci_endpoint_t, link);
	usb_log_debug("HCD EP(%p) added to list %s, first is %p(%p).\n",
		ep, instance->name, first, first->ed);
	if (last_ed == instance->list_head) {
		usb_log_debug2("%s head ED(%p-0x%0" PRIx32 "): %x:%x:%x:%x.\n",
		    instance->name, last_ed, instance->list_head_pa,
		    last_ed->status, last_ed->td_tail, last_ed->td_head,
		    last_ed->next);
	}
	fibril_mutex_unlock(&instance->guard);
}
Beispiel #20
0
//arrays.hpp
inline void factorvm::set_array_nth(array *array, cell slot, cell value)
{
#ifdef FACTOR_DEBUG
	assert(slot < array_capacity(array));
	assert(array->h.hi_tag() == ARRAY_TYPE);
	check_tagged_pointer(value);
#endif
	array->data()[slot] = value;
	write_barrier(array);
}
Beispiel #21
0
  /* The Tuple#put primitive. */
  Object* Tuple::put_prim(STATE, Fixnum* index, Object* val) {
    native_int idx = index->to_native();

    if(idx < 0 || num_fields() <= idx) {
      return bounds_exceeded_error(state, "Tuple::put_prim", idx);
    }

    field[idx] = val;
    write_barrier(state, val);
    return val;
  }
Beispiel #22
0
  Object* PackedObject::set_packed_ivar(STATE, Symbol* sym, Object* val) {
    LookupTable* tbl = this->reference_class()->packed_ivar_info();
    bool found = false;

    Fixnum* which = try_as<Fixnum>(tbl->fetch(state, sym, &found));
    if(!found) {
      return set_table_ivar(state, sym, val);
    }

    body_as_array()[which->to_native()] = val;
    if(val->reference_p()) write_barrier(state, val);
    return val;
  }
Beispiel #23
0
int fiber_rwlock_init(fiber_rwlock_t* rwlock)
{
    assert(rwlock);
    if(!mpsc_fifo_init(&rwlock->write_waiters)
       || !mpsc_fifo_init(&rwlock->read_waiters)) {
        mpsc_fifo_destroy(&rwlock->write_waiters);
        mpsc_fifo_destroy(&rwlock->read_waiters);
        return FIBER_ERROR;
    }
    rwlock->state.blob = 0;
    write_barrier();
    return FIBER_SUCCESS;
}
Beispiel #24
0
/* Special version of copy() for when we only want to copy the info
 * pointer of an object, but reserve some padding after it.  This is
 * used to optimise evacuation of TSOs.
 */
static bool
copyPart(StgClosure **p, StgClosure *src, uint32_t size_to_reserve,
         uint32_t size_to_copy, uint32_t gen_no)
{
    StgPtr to, from;
    uint32_t i;
    StgWord info;

#if defined(PARALLEL_GC)
spin:
        info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
        if (info == (W_)&stg_WHITEHOLE_info) {
#if defined(PROF_SPIN)
            whitehole_gc_spin++;
#endif
            busy_wait_nop();
            goto spin;
        }
    if (IS_FORWARDING_PTR(info)) {
        src->header.info = (const StgInfoTable *)info;
        evacuate(p); // does the failed_to_evac stuff
        return false;
    }
#else
    info = (W_)src->header.info;
#endif

    to = alloc_for_copy(size_to_reserve, gen_no);

    from = (StgPtr)src;
    to[0] = info;
    for (i = 1; i < size_to_copy; i++) { // unroll for small i
        to[i] = from[i];
    }

    write_barrier();
    src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
    *p = (StgClosure *)to;

#if defined(PROFILING)
    // We store the size of the just evacuated object in the LDV word so that
    // the profiler can guess the position of the next object later.
    SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
    // fill the slop
    if (size_to_reserve - size_to_copy > 0)
        LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy));
#endif

    return true;
}
Beispiel #25
0
/*
 * It is up to the caller to fill in the object's fields in a meaningful
 * fashion!
 */
inline object *factorvm::allot_object(header header, cell size)
{
#ifdef GC_DEBUG
	if(!gc_off)
		gc();
#endif

	object *obj;

	if(nursery.size - allot_buffer_zone > size)
	{
		/* If there is insufficient room, collect the nursery */
		if(nursery.here + allot_buffer_zone + size > nursery.end)
			garbage_collection(data->nursery(),false,0);

		cell h = nursery.here;
		nursery.here = h + align8(size);
		obj = (object *)h;
	}
	/* If the object is bigger than the nursery, allocate it in
	tenured space */
	else
	{
		zone *tenured = &data->generations[data->tenured()];

		/* If tenured space does not have enough room, collect */
		if(tenured->here + size > tenured->end)
		{
			gc();
			tenured = &data->generations[data->tenured()];
		}

		/* If it still won't fit, grow the heap */
		if(tenured->here + size > tenured->end)
		{
			garbage_collection(data->tenured(),true,size);
			tenured = &data->generations[data->tenured()];
		}

		obj = allot_zone(tenured,size);

		/* Allows initialization code to store old->new pointers
		without hitting the write barrier in the common case of
		a nursery allocation */
		write_barrier(obj);
	}

	obj->h = header;
	return obj;
}
Beispiel #26
0
static enum fio_q_status fio_ioring_queue(struct thread_data *td,
					  struct io_u *io_u)
{
	struct ioring_data *ld = td->io_ops_data;
	struct io_sq_ring *ring = &ld->sq_ring;
	unsigned tail, next_tail;

	fio_ro_check(td, io_u);

	if (ld->queued == ld->iodepth)
		return FIO_Q_BUSY;

	if (io_u->ddir == DDIR_TRIM) {
		if (ld->queued)
			return FIO_Q_BUSY;

		do_io_u_trim(td, io_u);
		io_u_mark_submit(td, 1);
		io_u_mark_complete(td, 1);
		return FIO_Q_COMPLETED;
	}

	tail = *ring->tail;
	next_tail = tail + 1;
	read_barrier();
	if (next_tail == *ring->head)
		return FIO_Q_BUSY;

	/* ensure sqe stores are ordered with tail update */
	write_barrier();
	ring->array[tail & ld->sq_ring_mask] = io_u->index;
	*ring->tail = next_tail;
	write_barrier();

	ld->queued++;
	return FIO_Q_QUEUED;
}
Beispiel #27
0
void verify_async_exit(struct thread_data *td)
{
	td->verify_thread_exit = 1;
	write_barrier();
	pthread_cond_broadcast(&td->verify_cond);

	pthread_mutex_lock(&td->io_u_lock);

	while (td->nr_verify_threads)
		pthread_cond_wait(&td->free_cond, &td->io_u_lock);

	pthread_mutex_unlock(&td->io_u_lock);
	free(td->verify_threads);
	td->verify_threads = NULL;
}
Beispiel #28
0
void wsd_work_stealing_deque_push_bottom(wsd_work_stealing_deque_t* d, void* p)
{
    assert(d);
    const int64_t b = d->bottom;
    const int64_t t = d->top;
    wsd_circular_array_t* a = d->underlying_array;
    const int64_t size = b - t;
    if(size >= a->size_minus_one) {
        /* top is actually < bottom. the circular array API expects start < end */
        a = wsd_circular_array_grow(a, t, b);
        /* NOTE: d->underlying_array is lost. memory leak. */
        d->underlying_array = a;
    }
    wsd_circular_array_put(a, b, p);
    write_barrier();
    d->bottom = b + 1;
}
Beispiel #29
0
int fiber_event_init()
{
    fiber_spinlock_lock(&fiber_loop_spinlock);

    assert("libev version mismatch" && ev_version_major () == EV_VERSION_MAJOR && ev_version_minor () >= EV_VERSION_MINOR);

    active_threads = fiber_manager_get_kernel_thread_count();

    write_barrier();//needed so active_threads is set before fiber_loop (see fiber_poll_events_blocking - active_threads should never be decremented before it's been set)

    fiber_loop = ev_loop_new(EVFLAG_AUTO);
    assert(fiber_loop);

    fiber_spinlock_unlock(&fiber_loop_spinlock);

    return fiber_loop ? FIBER_SUCCESS : FIBER_ERROR;
}
Beispiel #30
0
STATIC_INLINE StgInd *
lockCAF (StgRegTable *reg, StgIndStatic *caf)
{
    const StgInfoTable *orig_info;
    Capability *cap = regTableToCapability(reg);
    StgInd *bh;

    orig_info = caf->header.info;

#ifdef THREADED_RTS
    const StgInfoTable *cur_info;

    if (orig_info == &stg_IND_STATIC_info ||
        orig_info == &stg_WHITEHOLE_info) {
        // already claimed by another thread; re-enter the CAF
        return NULL;
    }

    cur_info = (const StgInfoTable *)
        cas((StgVolatilePtr)&caf->header.info,
            (StgWord)orig_info,
            (StgWord)&stg_WHITEHOLE_info);

    if (cur_info != orig_info) {
        // already claimed by another thread; re-enter the CAF
        return NULL;
    }

    // successfully claimed by us; overwrite with IND_STATIC
#endif

    // For the benefit of revertCAFs(), save the original info pointer
    caf->saved_info = orig_info;

    // Allocate the blackhole indirection closure
    bh = (StgInd *)allocate(cap, sizeofW(*bh));
    SET_HDR(bh, &stg_CAF_BLACKHOLE_info, caf->header.prof.ccs);
    bh->indirectee = (StgClosure *)cap->r.rCurrentTSO;

    caf->indirectee = (StgClosure *)bh;
    write_barrier();
    SET_INFO((StgClosure*)caf,&stg_IND_STATIC_info);

    return bh;
}