Example #1
0
/*
void free_root(struct procinfo* pi){
  KASSERT(pi != NULL);
  if (pi->parent_pid == -1){
    array_set(proc)
  }
  
}*/
void sys__exit(int exitcode) {

  struct addrspace *as;
  struct proc *p = curproc;
  /* for now, just include this to keep the compiler from complaining about
     an unused variable */
  #if OPT_A2
  
  struct procinfo *pi = array_get(procinfotable, p->pid-1);
  
  if(pi == NULL){
    goto parentexited;
  }

  lock_acquire(p->p_waitpid_lock);
  
  pi->exit_code = _MKWAIT_EXIT(exitcode);
  pi->active = 0;
  cv_broadcast(pi->waitpid_cv,p->p_waitpid_lock);

  lock_release(p->p_waitpid_lock);

  free_children(p->pid);
  
parentexited:  
  #else

  (void)exitcode;

  #endif
  DEBUG(DB_SYSCALL,"Syscall: _exit(%d)\n",exitcode);

  KASSERT(curproc->p_addrspace != NULL);
  as_deactivate();
  /*
   * clear p_addrspace before calling as_destroy. Otherwise if
   * as_destroy sleeps (which is quite possible) when we
   * come back we'll be calling as_activate on a
   * half-destroyed address space. This tends to be
   * messily fatal.
   */
  as = curproc_setas(NULL);
  as_destroy(as);

  /* detach this thread from its process */
  /* note: curproc cannot be used after this call */
  proc_remthread(curthread);

  /* if this is the last user process in the system, proc_destroy()
     will wake up the kernel menu thread */
  proc_destroy(p);
  
  thread_exit();
  /* thread_exit() does not return, so we should never get here */
  panic("return from thread_exit in sys_exit\n");
}
Example #2
0
void tracy_free(struct tracy* t) {
    /* Free hooks list */
    ll_free(t->hooks);

    /* Free all children */
    free_children(t->childs);
    ll_free(t->childs);

    free(t);
}
/*
 * recursively nukes a branch or an entire tree from the given node
 */
static void
free_children(struct sysctlnode *rnode) 
{
	struct sysctlnode *node;

	if (rnode == NULL ||
	    SYSCTL_TYPE(rnode->sysctl_flags) != CTLTYPE_NODE ||
	    rnode->sysctl_child == NULL)
		return;

	for (node = rnode->sysctl_child;
	     node < &rnode->sysctl_child[rnode->sysctl_clen];
	     node++) {
		free_children(node);
	}
	free(rnode->sysctl_child);
	rnode->sysctl_child = NULL;
}
Example #4
0
Neighbors find_neighbors_covertree_impl(RandomAccessIterator begin, RandomAccessIterator end, 
                         PairwiseCallback callback, IndexType k)
{
	timed_context context("Covertree-based neighbors search");

	typedef CoverTreePoint<RandomAccessIterator> TreePoint;
	v_array<TreePoint> points;
	for (RandomAccessIterator iter=begin; iter!=end; ++iter)
		push(points, TreePoint(iter, callback(*iter,*iter)));

	node<TreePoint> ct = batch_create(callback, points);

	v_array< v_array<TreePoint> > res;
	++k; // because one of the neighbors will be the actual query point
	k_nearest_neighbor(callback,ct,ct,res,k);

	Neighbors neighbors;
	neighbors.resize(end-begin);
	assert(end-begin==res.index);
	for (int i=0; i<res.index; ++i)
	{
		LocalNeighbors local_neighbors;
		local_neighbors.reserve(k);
		
		for (IndexType j=1; j<=k; ++j) // j=0 is the query point
		{
			// The actual query point is found as a neighbor, just ignore it
			if (res[i][j].iter_-begin==res[i][0].iter_-begin)
				continue;
			local_neighbors.push_back(res[i][j].iter_-begin);
		}
		neighbors[res[i][0].iter_-begin] = local_neighbors;
		free(res[i].elements);
	};
	free(res.elements);
	free_children(ct);
	free(points.elements);
	return neighbors;
}
/*
 * verifies that the head of the tree in the kernel is the same as the
 * head of the tree we already got, integrating new stuff and removing
 * old stuff, if it's not.
 */
static void
relearnhead(void)
{
	struct sysctlnode *h, *i, *o, qnode;
	size_t si, so;
	int rc, name;
	size_t nlen, olen, ni, oi;
	uint32_t t;

	/*
	 * if there's nothing there, there's no need to expend any
	 * effort
	 */
	if (sysctl_mibroot.sysctl_child == NULL)
		return;

	/*
	 * attempt to pull out the head of the tree, starting with the
	 * size we have now, and looping if we need more (or less)
	 * space
	 */
	si = 0;
	so = sysctl_mibroot.sysctl_clen * sizeof(struct sysctlnode);
	name = CTL_QUERY;
	memset(&qnode, 0, sizeof(qnode));
	qnode.sysctl_flags = SYSCTL_VERSION;
	do {
		si = so;
		h = malloc(si);
		rc = sysctl(&name, 1, h, &so, &qnode, sizeof(qnode));
		if (rc == -1 && errno != ENOMEM)
			return;
		if (si < so)
			free(h);
	} while (si < so);

	/*
	 * order the new copy of the head
	 */
	nlen = so / sizeof(struct sysctlnode);
	qsort(h, nlen, sizeof(struct sysctlnode), compar);

	/*
	 * verify that everything is the same.  if it is, we don't
	 * need to do any more work here.
	 */
	olen = sysctl_mibroot.sysctl_clen;
	rc = (nlen == olen) ? 0 : 1;
	o = sysctl_mibroot.sysctl_child;
	for (ni = 0; rc == 0 && ni < nlen; ni++) {
		if (h[ni].sysctl_num != o[ni].sysctl_num ||
		    h[ni].sysctl_ver != o[ni].sysctl_ver)
			rc = 1;
	}
	if (rc == 0) {
		free(h);
		return;
	}

	/*
	 * something changed.  h will become the new head, and we need
	 * pull over any subtrees we already have if they're the same
	 * version.
	 */
	i = h;
	ni = oi = 0;
	while (ni < nlen && oi < olen) {
		/*
		 * something was inserted or deleted
		 */
		if (SYSCTL_TYPE(i[ni].sysctl_flags) == CTLTYPE_NODE)
			i[ni].sysctl_child = NULL;
		if (i[ni].sysctl_num != o[oi].sysctl_num) {
			if (i[ni].sysctl_num < o[oi].sysctl_num) {
				ni++;
			}
			else {
				free_children(&o[oi]);
				oi++;
			}
			continue;
		}

		/*
		 * same number, but different version, so throw away
		 * any accumulated children
		 */
		if (i[ni].sysctl_ver != o[oi].sysctl_ver)
			free_children(&o[oi]);

		/*
		 * this node is the same, but we only need to
		 * move subtrees.
		 */
		else if (SYSCTL_TYPE(i[ni].sysctl_flags) == CTLTYPE_NODE) {	
			/*
			 * move subtree to new parent
			 */
			i[ni].sysctl_clen = o[oi].sysctl_clen;
			i[ni].sysctl_csize = o[oi].sysctl_csize;
			i[ni].sysctl_child = o[oi].sysctl_child;
			/*
			 * reparent inherited subtree
			 */
			for (t = 0;
			     i[ni].sysctl_child != NULL &&
				     t < i[ni].sysctl_clen;
			     t++)
				i[ni].sysctl_child[t].sysctl_parent = &i[ni];
		}
		ni++;
		oi++;
	}

	/*
	 * left over new nodes need to have empty subtrees cleared
	 */
	while (ni < nlen) {
		if (SYSCTL_TYPE(i[ni].sysctl_flags) == CTLTYPE_NODE)
			i[ni].sysctl_child = NULL;
		ni++;
	}

	/*
	 * left over old nodes need to be cleaned out
	 */
	while (oi < olen) {
		free_children(&o[oi]);
		oi++;
	}

	/*
	 * pop new head in
	 */
	_DIAGASSERT(__type_fit(uint32_t, nlen));
	sysctl_mibroot.sysctl_csize =
	    sysctl_mibroot.sysctl_clen = (uint32_t)nlen;
	sysctl_mibroot.sysctl_child = h;
	free(o);
}
Example #6
0
static void
free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
    dmu_tx_t *tx)
{
	dnode_t *dn;
	blkptr_t *bp;
	dmu_buf_impl_t *subdb;
	uint64_t start, end, dbstart, dbend, i;
	int epbs, shift;

	/*
	 * There is a small possibility that this block will not be cached:
	 *   1 - if level > 1 and there are no children with level <= 1
	 *   2 - if this block was evicted since we read it from
	 *	 dmu_tx_hold_free().
	 */
	if (db->db_state != DB_CACHED)
		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);

	dbuf_release_bp(db);
	bp = db->db.db_data;

	DB_DNODE_ENTER(db);
	dn = DB_DNODE(db);
	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
	shift = (db->db_level - 1) * epbs;
	dbstart = db->db_blkid << epbs;
	start = blkid >> shift;
	if (dbstart < start) {
		bp += start - dbstart;
	} else {
		start = dbstart;
	}
	dbend = ((db->db_blkid + 1) << epbs) - 1;
	end = (blkid + nblks - 1) >> shift;
	if (dbend <= end)
		end = dbend;

	ASSERT3U(start, <=, end);

	if (db->db_level == 1) {
		FREE_VERIFY(db, start, end, tx);
		free_blocks(dn, bp, end-start+1, tx);
	} else {
		for (i = start; i <= end; i++, bp++) {
			if (BP_IS_HOLE(bp))
				continue;
			rw_enter(&dn->dn_struct_rwlock, RW_READER);
			VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
			    i, TRUE, FALSE, FTAG, &subdb));
			rw_exit(&dn->dn_struct_rwlock);
			ASSERT3P(bp, ==, subdb->db_blkptr);

			free_children(subdb, blkid, nblks, tx);
			dbuf_rele(subdb, FTAG);
		}
	}

	/* If this whole block is free, free ourself too. */
	for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) {
		if (!BP_IS_HOLE(bp))
			break;
	}
	if (i == 1 << epbs) {
		/* didn't find any non-holes */
		bzero(db->db.db_data, db->db.db_size);
		free_blocks(dn, db->db_blkptr, 1, tx);
	} else {
		/*
		 * Partial block free; must be marked dirty so that it
		 * will be written out.
		 */
		ASSERT(db->db_dirtycnt > 0);
	}

	DB_DNODE_EXIT(db);
	arc_buf_freeze(db->db_buf);
}