Beispiel #1
0
bool expr_eq_fn::apply(expr const & a, expr const & b) {
    if (is_eqp(a, b))          return true;
    if (a.hash() != b.hash())  return false;
    if (a.kind() != b.kind())  return false;
    if (is_var(a))             return var_idx(a) == var_idx(b);
    if (m_counter >= LEAN_EQ_CACHE_THRESHOLD && is_shared(a) && is_shared(b)) {
        auto p = std::make_pair(a.raw(), b.raw());
        if (!m_eq_visited)
            m_eq_visited.reset(new expr_cell_pair_set);
        if (m_eq_visited->find(p) != m_eq_visited->end())
            return true;
        m_eq_visited->insert(p);
    }
    check_system("expression equality test");
    switch (a.kind()) {
    case expr_kind::Var:
        lean_unreachable(); // LCOV_EXCL_LINE
    case expr_kind::Constant:
        return
            const_name(a) == const_name(b) &&
            compare(const_levels(a), const_levels(b), [](level const & l1, level const & l2) { return l1 == l2; });
    case expr_kind::Local: case expr_kind::Meta:
        return
            mlocal_name(a) == mlocal_name(b) &&
            apply(mlocal_type(a), mlocal_type(b));
    case expr_kind::App:
        m_counter++;
        return
            apply(app_fn(a), app_fn(b)) &&
            apply(app_arg(a), app_arg(b));
    case expr_kind::Lambda: case expr_kind::Pi:
        m_counter++;
        return
            apply(binding_domain(a), binding_domain(b)) &&
            apply(binding_body(a), binding_body(b)) &&
            (!m_compare_binder_info || binding_info(a) == binding_info(b));
    case expr_kind::Sort:
        return sort_level(a) == sort_level(b);
    case expr_kind::Macro:
        m_counter++;
        if (macro_def(a) != macro_def(b) || macro_num_args(a) != macro_num_args(b))
            return false;
        for (unsigned i = 0; i < macro_num_args(a); i++) {
            if (!apply(macro_arg(a, i), macro_arg(b, i)))
                return false;
        }
        return true;
    case expr_kind::Let:
        m_counter++;
        return
            apply(let_type(a), let_type(b)) &&
            apply(let_value(a), let_value(b)) &&
            apply(let_body(a), let_body(b));
    }
    lean_unreachable(); // LCOV_EXCL_LINE
}
Beispiel #2
0
 bool check(expr const & a, expr const & b) {
     if (!is_shared(a) || !is_shared(b))
         return false;
     unsigned i = hash(a.hash_alloc(), b.hash_alloc()) % m_capacity;
     if (m_cache[i].m_a == a.raw() && m_cache[i].m_b == b.raw()) {
         return true;
     } else {
         if (m_cache[i].m_a == nullptr)
             m_used.push_back(i);
         m_cache[i].m_a = a.raw();
         m_cache[i].m_b = b.raw();
         return false;
     }
 }
 bool is_shared_eq(expr * t, expr * & lhs, expr * & value) {
     expr* arg1, *arg2;
     if (!m.is_eq(t, arg1, arg2))
         return false;
     if (m.is_value(arg1) && is_shared(arg2)) {
         lhs   = arg2;
         value = arg1;
         return true;
     }
     if (m.is_value(arg2) && is_shared(arg1)) {
         lhs   = arg1;
         value = arg2;
         return true;
     }
     return false;
 }
expr replace_visitor::visit(expr const & e) {
    check_system("expression replacer");
    bool shared = false;
    if (is_shared(e)) {
        shared = true;
        auto it = m_cache.find(e);
        if (it != m_cache.end())
            return it->second;
    }

    switch (e.kind()) {
    case expr_kind::Sort:      return save_result(e, visit_sort(e), shared);
    case expr_kind::Macro:     return save_result(e, visit_macro(e), shared);
    case expr_kind::Constant:  return save_result(e, visit_constant(e), shared);
    case expr_kind::Var:       return save_result(e, visit_var(e), shared);
    case expr_kind::Meta:      return save_result(e, visit_meta(e), shared);
    case expr_kind::Local:     return save_result(e, visit_local(e), shared);
    case expr_kind::App:       return save_result(e, visit_app(e), shared);
    case expr_kind::Lambda:    return save_result(e, visit_lambda(e), shared);
    case expr_kind::Pi:        return save_result(e, visit_pi(e), shared);
    case expr_kind::Let:       return save_result(e, visit_let(e), shared);
    }

    lean_unreachable(); // LCOV_EXCL_LINE
}
Beispiel #5
0
ciField::ciField(fieldDescriptor *fd): _known_to_link_with(NULL) {
  ASSERT_IN_VM;

  _cp_index = -1;

  // Get the field's name, signature, and type.
  ciEnv* env = CURRENT_ENV;
  _name = env->get_object(fd->name())->as_symbol();
  _signature = env->get_object(fd->signature())->as_symbol();

  BasicType field_type = fd->field_type();

  // If the field is a pointer type, get the klass of the
  // field.
  if (field_type == T_OBJECT || field_type == T_ARRAY) {
    _type = NULL;  // must call compute_type on first access
  } else {
    _type = ciType::make(field_type);
  }

  initialize_from(fd);

  // Either (a) it is marked shared, or else (b) we are done bootstrapping.
  assert(is_shared() || ciObjectFactory::is_initialized(),
         "bootstrap classes must not create & cache unshared fields");
}
Beispiel #6
0
        operator boost::asio::mutable_buffer() {
            if (is_shared())
                deep_copy();

            auto pv = zmq_msg_data(const_cast<zmq_msg_t*>(&msg_));
            return boost::asio::buffer(pv, size());
        }
 void push_result(expr * new_curr, proof * new_pr) {
     if (m_goal->proofs_enabled()) {
         proof * pr = m_goal->pr(m_idx);
         new_pr     = m.mk_modus_ponens(pr, new_pr);
     }
     
     expr_dependency_ref new_d(m);
     if (m_goal->unsat_core_enabled()) {
         new_d = m_goal->dep(m_idx);
         expr_dependency * used_d = m_r.get_used_dependencies();
         if (used_d != nullptr) {
             new_d = m.mk_join(new_d, used_d);
             m_r.reset_used_dependencies();
         }
     }
     
     m_goal->update(m_idx, new_curr, new_pr, new_d);
 
     if (is_shared(new_curr)) {
         m_subst->insert(new_curr, m.mk_true(), m.mk_iff_true(new_pr), new_d);
     }
     expr * atom;
     if (is_shared_neg(new_curr, atom)) {
         m_subst->insert(atom, m.mk_false(), m.mk_iff_false(new_pr), new_d);
     }
     expr * lhs, * value;
     if (is_shared_eq(new_curr, lhs, value)) {
         TRACE("shallow_context_simplifier_bug", tout << "found eq:\n" << mk_ismt2_pp(new_curr, m) << "\n";);
         m_subst->insert(lhs, value, new_pr, new_d);
     }
Beispiel #8
0
int BC_Bitmap::write_drawable(Drawable &pixmap, GC &gc,
		int source_x, int source_y, int source_w, int source_h,
		int dest_x, int dest_y, int dest_w, int dest_h, 
		int dont_wait)
{
//printf("BC_Bitmap::write_drawable 1 %p %d\n", this, current_ringbuffer); fflush(stdout);
	//if( dont_wait ) XSync(top_level->display, False);
	BC_BitmapImage *bfr = cur_bfr();
	if( !bfr->is_zombie() && is_shared() && shm_reply ) {
//printf("activate %p %08lx\n",bfr,bfr->get_shmseg());
		top_level->active_bitmaps.insert(bfr, pixmap);
		if( ++active_buffers > max_active_buffers )
			max_active_buffers = active_buffers;
	}
	bfr->write_drawable(pixmap, gc,
		source_x, source_y, source_w, source_h,
		dest_x, dest_y, dest_w, dest_h);
	XFlush(top_level->display);
	avail_lock->lock(" BC_Bitmap::write_drawable");
	if( bfr->is_zombie() ) {
		delete bfr;
		--zombies;
	}
	else if( is_unshared() || !shm_reply )
		avail.append(bfr);
	active_bfr = 0;
	avail_lock->unlock();
	last_pixmap = pixmap;
	last_pixmap_used = 1;
	if( !dont_wait && !shm_reply )
		XSync(top_level->display, False);
	return 0;
}
 bool is_shared_eq(expr * t, expr * & lhs, expr * & value) {
     if (!m().is_eq(t))
         return false;
     expr * arg1 = to_app(t)->get_arg(0);
     expr * arg2 = to_app(t)->get_arg(1);
     if (m().is_value(arg1) && is_shared(arg2)) {
         lhs   = arg2;
         value = arg1;
         return true;
     }
     if (m().is_value(arg2) && is_shared(arg1)) {
         lhs   = arg1;
         value = arg2;
         return true;
     }
     return false;
 }
Beispiel #10
0
Dictionary Dictionary::copy() const {

	Dictionary n(is_shared());

	List<Variant> keys;
	get_key_list(&keys);

	for(List<Variant>::Element *E=keys.front();E;E=E->next()) {
		n[E->get()]=operator[](E->get());
	}

	return n;
}
Beispiel #11
0
void 
data_type<T>::free() 
{
    if (!is_default())
    { 
	if (is_shared())
	    copy_default();
	else
	{
	    m_repository.free(m_ix); 
	    load_default();
	}
    }
}
Beispiel #12
0
static void update_private(
    LABYRINTH labyrinth, CELL walls[], int *last_private, int *first_shared
)
{
    for (int i = 0; i <= *last_private; i++) {
        CELL wall = walls[i];
        CELL index = wall & GROUP_MASK;
        WALL orientation = wall & WALLS_MASK;
        CELL *cell1 = labyrinth + index;
        CELL *cell2 = cell_fellow(cell1, orientation);

        CELL *root1 = cell_root(labyrinth, cell1)
           , *root2 = cell_root(labyrinth, cell2);

        if (root1 == root2) // Le mur ne peut plus être supprimé.
            walls[i--] = walls[(*last_private)--];
        else if (is_shared(*root1) && is_shared(*root2)) {
            // Déplace le mur dans les murs partagés.
            walls[--(*first_shared)] = walls[i];
            walls[i--] = walls[(*last_private)--];
        }
    }
}
Beispiel #13
0
void 
data_type<T>::free( 
    ix_type a_ix
    ) 
{
    if (a_ix!=get_default_ix())
    {
	if (is_shared())
	    copy_default();
	else
	{
	    m_repository.free(a_ix); 
	    if (m_ix==a_ix) load_default();
	}
    }
}
// ------------------------------------------------------------------
// ciInstanceKlass::ciInstanceKlass
//
// Loaded instance klass.
ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
  ciKlass(h_k)
{
  assert(get_Klass()->oop_is_instance(), "wrong type");
  assert(get_instanceKlass()->is_loaded(), "must be at least loaded");
  InstanceKlass* ik = get_instanceKlass();

  AccessFlags access_flags = ik->access_flags();
  _flags = ciFlags(access_flags);
  _has_finalizer = access_flags.has_finalizer();
  _has_subklass = ik->subklass() != NULL;
  _init_state = ik->init_state();
  _nonstatic_field_size = ik->nonstatic_field_size();
  _has_nonstatic_fields = ik->has_nonstatic_fields();
  _has_default_methods = ik->has_default_methods();
  _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
  _has_injected_fields = -1;
  _implementor = NULL; // we will fill these lazily

  Thread *thread = Thread::current();
  if (ciObjectFactory::is_initialized()) {
    _loader = JNIHandles::make_local(thread, ik->class_loader());
    _protection_domain = JNIHandles::make_local(thread,
                                                ik->protection_domain());
    _is_shared = false;
  } else {
    Handle h_loader(thread, ik->class_loader());
    Handle h_protection_domain(thread, ik->protection_domain());
    _loader = JNIHandles::make_global(h_loader);
    _protection_domain = JNIHandles::make_global(h_protection_domain);
    _is_shared = true;
  }

  // Lazy fields get filled in only upon request.
  _super  = NULL;
  _java_mirror = NULL;

  if (is_shared()) {
    if (h_k() != SystemDictionary::Object_klass()) {
      super();
    }
    //compute_nonstatic_fields();  // done outside of constructor
  }

  _field_cache = NULL;
}
/*
 * Check to see if the filesystem is currently shared.
 */
zfs_share_type_t
zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto)
{
	char *mountpoint;
	zfs_share_type_t rc;

	if (!zfs_is_mounted(zhp, &mountpoint))
		return (SHARED_NOT_SHARED);

	if ((rc = is_shared(zhp->zfs_hdl, mountpoint, proto))) {
		if (where != NULL)
			*where = mountpoint;
		else
			free(mountpoint);
		return (rc);
	} else {
		free(mountpoint);
		return (SHARED_NOT_SHARED);
	}
}
Beispiel #16
0
/*
 * Unshare the given filesystem.
 */
int
zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint,
    zfs_share_proto_t *proto)
{
	struct mnttab search = { 0 }, entry;
	char *mntpt = NULL;

	/* check to see if need to unmount the filesystem */
	search.mnt_special = (char *)zfs_get_name(zhp);
	search.mnt_fstype = MNTTYPE_ZFS;

#ifndef __APPLE__
	rewind(zhp->zfs_hdl->libzfs_mnttab);
#endif /*!__APPLE__*/
	if (mountpoint != NULL)
		mntpt = zfs_strdup(zhp->zfs_hdl, mountpoint);

	if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
	    getmntany(zhp->zfs_hdl->libzfs_mnttab, &entry, &search) == 0)) {
		zfs_share_proto_t *curr_proto;

		if (mountpoint == NULL)
			mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);

		for (curr_proto = proto; *curr_proto != PROTO_END;
		    curr_proto++) {

			if (is_shared(zhp->zfs_hdl, mntpt, *curr_proto) &&
			    unshare_one(zhp->zfs_hdl, zhp->zfs_name,
			    mntpt, *curr_proto) != 0) {
				if (mntpt != NULL)
					free(mntpt);
				return (-1);
			}
		}
	}
	if (mntpt != NULL)
		free(mntpt);

	return (0);
}
Beispiel #17
0
static inline
struct vm_area_struct* find_next_vma(long pid, struct mm_struct *mm, struct vm_area_struct* prev_vma)
{
	struct vm_area_struct *tmp = prev_vma;
	int i = 0;

	while (1) {

		if (!tmp) {
			if (++i>2)
				return NULL;
			proc[pid].num_walks++;
			tmp = mm->mmap;
		} else {
			tmp = tmp->vm_next;
		}

		if (tmp && (!spcd_vma_shared_flag || is_shared(tmp)))
			return tmp;
	}
}
Beispiel #18
0
    expr apply(expr const & e, unsigned offset) {
        bool shared = false;
        if (m_use_cache && is_shared(e)) {
            if (auto r = m_cache->find(e, offset))
                return *r;
            shared = true;
        }
        check_interrupted();
        check_memory("replace");

        if (optional<expr> r = m_f(e, offset)) {
            return save_result(e, offset, *r, shared);
        } else {
            switch (e.kind()) {
            case expr_kind::Constant: case expr_kind::Sort: case expr_kind::Var:
                return save_result(e, offset, e, shared);
            case expr_kind::Meta:     case expr_kind::Local: {
                expr new_t = apply(mlocal_type(e), offset);
                return save_result(e, offset, update_mlocal(e, new_t), shared);
            }
            case expr_kind::App: {
                expr new_f = apply(app_fn(e), offset);
                expr new_a = apply(app_arg(e), offset);
                return save_result(e, offset, update_app(e, new_f, new_a), shared);
            }
            case expr_kind::Pi: case expr_kind::Lambda: {
                expr new_d = apply(binding_domain(e), offset);
                expr new_b = apply(binding_body(e), offset+1);
                return save_result(e, offset, update_binding(e, new_d, new_b), shared);
            }
            case expr_kind::Macro: {
                buffer<expr> new_args;
                unsigned nargs = macro_num_args(e);
                for (unsigned i = 0; i < nargs; i++)
                    new_args.push_back(apply(macro_arg(e, i), offset));
                return save_result(e, offset, update_macro(e, new_args.size(), new_args.data()), shared);
            }}
            lean_unreachable();
        }
    }
Beispiel #19
0
/*
 * Unshare the given filesystem.
 */
int
zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint,
    zfs_share_proto_t *proto)
{
	libzfs_handle_t *hdl = zhp->zfs_hdl;
	struct mnttab entry;
	char *mntpt = NULL;

	/* check to see if need to unmount the filesystem */
	rewind(zhp->zfs_hdl->libzfs_mnttab);
	if (mountpoint != NULL)
		mountpoint = mntpt = zfs_strdup(hdl, mountpoint);

	if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
	    libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) {
		zfs_share_proto_t *curr_proto;

		if (mountpoint == NULL)
			mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);

		for (curr_proto = proto; *curr_proto != PROTO_END;
		    curr_proto++) {

			while (is_shared(hdl, mntpt, *curr_proto)) {
			    if (unshare_one(hdl, zhp->zfs_name,
					mntpt, *curr_proto) != 0) {
				if (mntpt != NULL)
					free(mntpt);
				return (-1);
			    }
			}
		}
	}
	if (mntpt != NULL)
		free(mntpt);

	return (0);
}
        void io_looper_task_queue::enqueue(task* task)
        {
            // put into locked queue when it is shared or from remote threads
            if (is_shared() || task::get_current_worker() != this->owner_worker())
            {
                {
                    utils::auto_lock<::dsn::utils::ex_lock_nr_spin> l(_lock);
                    _remote_tasks.add(task);
                }

                int old = _remote_count.fetch_add(1, std::memory_order_release);
                if (old == 0)
                {
                    notify_local_execution();
                }
            }

            // put into local queue
            else
            {
                _local_tasks.add(task);
            }
        }
Beispiel #21
0
 expr apply(expr const & a) {
     bool sh = false;
     if (is_shared(a)) {
         auto r = m_cache.find(a.raw());
         if (r != m_cache.end())
             return r->second;
         sh = true;
     }
     switch (a.kind()) {
     case expr_kind::Var: case expr_kind::Constant: case expr_kind::Type: case expr_kind::Value:
         return save_result(a, copy(a), sh);
     case expr_kind::App: {
         buffer<expr> new_args;
         for (expr const & old_arg : args(a))
             new_args.push_back(apply(old_arg));
         return save_result(a, mk_app(new_args), sh);
     }
     case expr_kind::HEq:      return save_result(a, mk_heq(apply(heq_lhs(a)), apply(heq_rhs(a))), sh);
     case expr_kind::Pair:     return save_result(a, mk_pair(apply(pair_first(a)), apply(pair_second(a)), apply(pair_type(a))), sh);
     case expr_kind::Proj:     return save_result(a, mk_proj(proj_first(a), apply(proj_arg(a))), sh);
     case expr_kind::Lambda:   return save_result(a, mk_lambda(abst_name(a), apply(abst_domain(a)), apply(abst_body(a))), sh);
     case expr_kind::Pi:       return save_result(a, mk_pi(abst_name(a), apply(abst_domain(a)), apply(abst_body(a))), sh);
     case expr_kind::Sigma:    return save_result(a, mk_sigma(abst_name(a), apply(abst_domain(a)), apply(abst_body(a))), sh);
     case expr_kind::Let:      return save_result(a, mk_let(let_name(a), apply(let_type(a)), apply(let_value(a)), apply(let_body(a))), sh);
     case expr_kind::MetaVar:
         return save_result(a,
                            update_metavar(a, [&](local_entry const & e) -> local_entry {
                                    if (e.is_inst())
                                        return mk_inst(e.s(), apply(e.v()));
                                    else
                                        return e;
                                }),
                            sh);
     }
     lean_unreachable(); // LCOV_EXCL_LINE
 }
 bool is_shared_neg(expr * t, expr * & atom) {            
     if (!m.is_not(t, atom))
         return false;
     return is_shared(atom);
 }
Beispiel #23
0
        boost::asio::mutable_buffer buffer() {
            if (is_shared())
                deep_copy();

            return boost::asio::buffer(const_cast<void *>(data()), size());
        }
/*
 * Unshare and unmount all datasets within the given pool.  We don't want to
 * rely on traversing the DSL to discover the filesystems within the pool,
 * because this may be expensive (if not all of them are mounted), and can fail
 * arbitrarily (on I/O error, for example).  Instead, we walk /etc/mtab and
 * gather all the filesystems that are currently mounted.
 */
int
zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
{
	int used, alloc;
	struct mnttab entry;
	size_t namelen;
	char **mountpoints = NULL;
	zfs_handle_t **datasets = NULL;
	libzfs_handle_t *hdl = zhp->zpool_hdl;
	int i;
	int ret = -1;
	int flags = (force ? MS_FORCE : 0);

	namelen = strlen(zhp->zpool_name);

	rewind(hdl->libzfs_mnttab);
	used = alloc = 0;
	while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
		/*
		 * Ignore non-ZFS entries.
		 */
		if (entry.mnt_fstype == NULL ||
		    strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
			continue;

		/*
		 * Ignore filesystems not within this pool.
		 */
		if (entry.mnt_mountp == NULL ||
		    strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
		    (entry.mnt_special[namelen] != '/' &&
		    entry.mnt_special[namelen] != '\0'))
			continue;

		/*
		 * At this point we've found a filesystem within our pool.  Add
		 * it to our growing list.
		 */
		if (used == alloc) {
			if (alloc == 0) {
				if ((mountpoints = zfs_alloc(hdl,
				    8 * sizeof (void *))) == NULL)
					goto out;

				if ((datasets = zfs_alloc(hdl,
				    8 * sizeof (void *))) == NULL)
					goto out;

				alloc = 8;
			} else {
				void *ptr;

				if ((ptr = zfs_realloc(hdl, mountpoints,
				    alloc * sizeof (void *),
				    alloc * 2 * sizeof (void *))) == NULL)
					goto out;
				mountpoints = ptr;

				if ((ptr = zfs_realloc(hdl, datasets,
				    alloc * sizeof (void *),
				    alloc * 2 * sizeof (void *))) == NULL)
					goto out;
				datasets = ptr;

				alloc *= 2;
			}
		}

		if ((mountpoints[used] = zfs_strdup(hdl,
		    entry.mnt_mountp)) == NULL)
			goto out;

		/*
		 * This is allowed to fail, in case there is some I/O error.  It
		 * is only used to determine if we need to remove the underlying
		 * mountpoint, so failure is not fatal.
		 */
		datasets[used] = make_dataset_handle(hdl, entry.mnt_special);

		used++;
	}

	/*
	 * At this point, we have the entire list of filesystems, so sort it by
	 * mountpoint.
	 */
	qsort(mountpoints, used, sizeof (char *), mountpoint_compare);

	/*
	 * Walk through and first unshare everything.
	 */
	for (i = 0; i < used; i++) {
		zfs_share_proto_t *curr_proto;
		for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
		    curr_proto++) {
			if (is_shared(hdl, mountpoints[i], *curr_proto) &&
			    unshare_one(hdl, mountpoints[i],
			    mountpoints[i], *curr_proto) != 0)
				goto out;
		}
	}

	/*
	 * Now unmount everything, removing the underlying directories as
	 * appropriate.
	 */
	for (i = 0; i < used; i++) {
		if (unmount_one(hdl, mountpoints[i], flags) != 0)
			goto out;
	}

	for (i = 0; i < used; i++) {
		if (datasets[i])
			remove_mountpoint(datasets[i]);
	}

	ret = 0;
out:
	for (i = 0; i < used; i++) {
		if (datasets[i])
			zfs_close(datasets[i]);
		free(mountpoints[i]);
	}
	free(datasets);
	free(mountpoints);

	return (ret);
}
Beispiel #25
0
 bool is_shared_neg(expr * t, expr * & atom) {
     if (!m().is_not(t))
         return false;
     atom = to_app(t)->get_arg(0);
     return is_shared(atom);
 }
Beispiel #26
0
/*
 * Unshare and unmount all datasets within the given pool.  We don't want to
 * rely on traversing the DSL to discover the filesystems within the pool,
 * because this may be expensive (if not all of them are mounted), and can fail
 * arbitrarily (on I/O error, for example).  Instead, we walk /etc/mtab and
 * gather all the filesystems that are currently mounted.
 */
int
zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
{
	int used, alloc;
	struct mnttab entry;
	size_t namelen;
	char **mountpoints = NULL;
	zfs_handle_t **datasets = NULL;
	libzfs_handle_t *hdl = zhp->zpool_hdl;
	int i;
	int ret = -1;
	int flags = (force ? MS_FORCE : 0);

	namelen = strlen(zhp->zpool_name);

	/* Reopen MNTTAB to prevent reading stale data from open file */
	if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL)
		return (ENOENT);

	used = alloc = 0;
	while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
		/*
		 * Ignore filesystems not within this pool.
		 */
		if (entry.mnt_fstype == NULL ||
		    strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
		    (entry.mnt_special[namelen] != '/' &&
#ifdef __APPLE__
		    /*
		     * On OS X, '@' is possible too since we're temporarily
		     * allowing manual snapshot mounting.
		     */
		    entry.mnt_special[namelen] != '@' &&
#endif /* __APPLE__ */
		    entry.mnt_special[namelen] != '\0'))
			continue;

		/*
		 * At this point we've found a filesystem within our pool.  Add
		 * it to our growing list.
		 */
		if (used == alloc) {
			if (alloc == 0) {
				if ((mountpoints = zfs_alloc(hdl,
				    8 * sizeof (void *))) == NULL)
					goto out;

				if ((datasets = zfs_alloc(hdl,
				    8 * sizeof (void *))) == NULL)
					goto out;

				alloc = 8;
			} else {
				void *ptr;

				if ((ptr = zfs_realloc(hdl, mountpoints,
				    alloc * sizeof (void *),
				    alloc * 2 * sizeof (void *))) == NULL)
					goto out;
				mountpoints = ptr;

				if ((ptr = zfs_realloc(hdl, datasets,
				    alloc * sizeof (void *),
				    alloc * 2 * sizeof (void *))) == NULL)
					goto out;
				datasets = ptr;

				alloc *= 2;
			}
		}

               if ((mountpoints[used] = zfs_strdup(hdl,
                    entry.mnt_mountp)) == NULL)
                        goto out;

		/*
		 * This is allowed to fail, in case there is some I/O error.  It
		 * is only used to determine if we need to remove the underlying
		 * mountpoint, so failure is not fatal.
		 */
		datasets[used] = make_dataset_handle(hdl, entry.mnt_special);

		used++;
	}

	/*
	 * At this point, we have the entire list of filesystems, so sort it by
	 * mountpoint.
	 */
	qsort(mountpoints, used, sizeof (char *), mountpoint_compare);

	/*
	 * Walk through and first unshare everything.
	 */
	for (i = 0; i < used; i++) {
		zfs_share_proto_t *curr_proto;
		for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
		    curr_proto++) {
			if (is_shared(hdl, mountpoints[i], *curr_proto) &&
			    unshare_one(hdl, mountpoints[i],
			    mountpoints[i], *curr_proto) != 0)
				goto out;
		}
	}

	/*
	 * Now unmount everything, removing the underlying directories as
	 * appropriate.
	 */
	for (i = 0; i < used; i++) {
		if (unmount_one(hdl, mountpoints[i], flags) != 0)
			goto out;
	}

	for (i = 0; i < used; i++) {
		if (datasets[i])
			remove_mountpoint(datasets[i]);
	}

    // Surely there exists a better way to iterate a POOL to find its ZVOLs?
    zfs_iter_root(hdl, zpool_disable_volumes, (void *) zpool_get_name(zhp));

	ret = 0;
out:
	for (i = 0; i < used; i++) {
		if (datasets[i])
			zfs_close(datasets[i]);
		free(mountpoints[i]);
	}
	free(datasets);
	free(mountpoints);

	return (ret);
}
 // Checks if the pointer is either in unshared space or in shared space
 inline bool is_in(const void* p) const {
   return OneContigSpaceCardGeneration::is_in(p) || is_shared(p);
 }
Beispiel #28
0
static void semwait(int sem)
{
    struct sembuf sbuf = { .sem_num = 0, .sem_op = -1, .sem_flg = 0 };
    assert (semop(sem, &sbuf, 1) != -1);
}

static void semsignal(int sem)
{
    struct sembuf sbuf = { .sem_num = 0, .sem_op = 1, .sem_flg = 0 };
    assert (semop(sem, &sbuf, 1) != -1);
}

static pid_t fork_generator(
    int sem_labyrinth, int shm_stats, int shm_labyrinth, int top_x, int top_y,
    int left_x, int left_y
)
{
    pid_t child = fork();
    if (child != 0) { // Parent
        assert (child != -1);
        return child;
    } else // Child
        exit(generator(
            sem_labyrinth, shm_stats, shm_labyrinth, top_x, top_y, 
            left_x, left_y)
        );
}

// Retourne la cellule qui partage le même mur.
static CELL *cell_fellow(CELL *cell, WALL orientation);

// Retourne l'orientation opposée d'un mur
static WALL orientation_inv(WALL orientation);

// Vérifie que certains murs privés ne partagent pas à présent les mêmes groupes
// ou qu'ils ne sont pas devenus partagés lorsque on a lié les deux groupes.
static void update_private(
    LABYRINTH labyrinth, CELL walls[], int *last_private, int *first_shared
);

static int generator(
    int sem_labyrinth, int shm_stats, int shm_labyrinth, int top_x, int top_y,
    int left_x, int left_y
)
{
    int size = LABYRINTH_SIZE / 2;

    // Récupère les mémoires partagées.
    PARAL_STATS *stats = (PARAL_STATS *) shmat(shm_stats, NULL, 0);
    assert (stats != (PARAL_STATS *) -1);
    LABYRINTH labyrinth = (LABYRINTH) shmat(shm_labyrinth, NULL, 0);
    assert (labyrinth != (LABYRINTH) -1);

    // Retiens les murs non ouverts dans la zone du générateur (indice de la
    // cellule avec orientation du mur, utilise les bits dédiés au groupe
    // pour encoder l'indice).
    // Ajoute les murs non partagés à partir de la gauche et ceux partagés à
    // partir de la droite du vecteur.
    int n_walls = 2 * size * size;
    int last_private = -1
      , first_shared = n_walls;
    CELL walls[n_walls];

    semwait(sem_labyrinth); // Accède en lecture à des cellules partagées.
    for (int i = 0; i < size; i++) {
        for (int j = 0; j < size; j++) {
            int top_index  = (top_y + i)  * LABYRINTH_SIZE + top_x + j
              , left_index = (left_y + i) * LABYRINTH_SIZE + left_x + j;

            CELL *top_cell  = labyrinth + top_index
               , *left_cell = labyrinth + left_index;

            CELL top_fellow  = *cell_fellow(top_cell, WALL_TOP)
               , left_fellow = *cell_fellow(left_cell, WALL_LEFT);

            // Ajoute les murs sur des cases partagées à droite, les autres
            // à gauche.

            if (is_shared(*top_cell) && is_shared(top_fellow))
                walls[--first_shared] = WALL_TOP | (CELL) top_index;
            else
                walls[++last_private] = WALL_TOP | (CELL) top_index;

            if (is_shared(*left_cell) && is_shared(left_fellow))
                walls[--first_shared] = WALL_LEFT | (CELL) left_index;
            else
                walls[++last_private] = WALL_LEFT | (CELL) left_index;
        }
    }
    semsignal(sem_labyrinth);

    // Boucle tant qu'il reste des murs à supprimer.
    for (;;) {
        semwait(sem_labyrinth);

        // Vérifie que les murs partagés n'ont pas été supprimés par un autre
        // processus et qu'ils départagent toujours deux groupes différents.
        for (int i = n_walls - 1; i >= first_shared; i--) {
            CELL wall = walls[i];
            CELL index = wall & GROUP_MASK;
            WALL orientation = wall & WALLS_MASK;
            CELL *cell1 = labyrinth + index;
            CELL *cell2 = cell_fellow(cell1, orientation);

            bool can_be_removed =
                   is_wall(*cell1, orientation)
                && cell_root(labyrinth, cell1) != cell_root(labyrinth, cell2);

            if (!can_be_removed) // Ne peut plus supprimer ce mur.
                walls[i++] = walls[first_shared++];
        }

        // Sélectionne au hasard un mur dans les murs restants.
        int n_remaining = (last_private + 1) + (n_walls - first_shared);
        if (n_remaining == 0) {
            semsignal(sem_labyrinth);
            break;
        }
        int random_index = rand() % n_remaining;

        if (random_index <= last_private) {
            stats->hits++;
            semsignal(sem_labyrinth);

            // Cette partie de l'algorithme peut s'effectuer en même temps sur
            // différents processus :

            CELL wall = walls[random_index];
            CELL index = wall & GROUP_MASK;
            WALL orientation = wall & WALLS_MASK;
            CELL *cell1 = labyrinth + index;
            CELL *cell2 = cell_fellow(cell1, orientation);
            CELL *root1 = cell_root(labyrinth, cell1)
               , *root2 = cell_root(labyrinth, cell2);

            // Attache toujours une cellule qui n'appartient pas à un groupe
            // partagé à l'autre cellule. De cette manière, la racine d'un
            // groupe dont au moins une cellule est partagée sera une cellule
            // partagée. Ceci évite également de modifier une forêt qui pourrait
            // être modifiée par un autre processus au même moment.
            if (is_shared(*root1))
                cell_attach_group(root2, *root1);
            else
                cell_attach_group(root1, *root2);
            // Supprime le mur de la liste d'attente et des deux cellules.
            walls[random_index] = walls[last_private--];

            // Ces deux instructions sont équivalentes à ces deux instructions:
            // *cell1 &= ~orientation;
            // *cell2 &= ~orientation_inv(orientation);
            // A l'exception que le compilateur garantit que ces instructions
            // s'exécutent de manière atomique, et permet ainsi d'éviter que 
            // deux processus suppriment deux murs différents d'une même cellule
            // au même instant, ce qui engendrait une perte de l'une des deux
            // modifications.
            __sync_fetch_and_and(cell1, ~orientation);
            __sync_fetch_and_and(cell2, ~orientation_inv(orientation));
            // Une alternative aurait été d'acquérir la sémaphore sem_labyrinth
            // lorsque l'on change l'orientation d'une cellule qui est sur un
            // bord de la zone du générateur, mais cette méthode est
            // considérablement plus rapide.

            update_private(labyrinth, walls, &last_private, &first_shared);
        } else {
            stats->misses++;

            int wall_index = random_index - (last_private + 1) + first_shared;
            CELL wall = walls[wall_index];
            CELL index = wall & GROUP_MASK;
            WALL orientation = wall & WALLS_MASK;
            CELL *cell1 = labyrinth + index;
            CELL *cell2 = cell_fellow(cell1, orientation);
            CELL *root1 = cell_root(labyrinth, cell1);

            cell_attach_group(root1, *cell2);

            // Supprime le mur de la liste d'attente et des deux cellules.
            walls[wall_index] = walls[first_shared++];
            *cell1 &= ~orientation;
            *cell2 &= ~orientation_inv(orientation);

            semsignal(sem_labyrinth);
        }
    }
    return EXIT_SUCCESS;
}