Пример #1
0
static int
shmif_unclone(struct ifnet *ifp)
{
	struct shmif_sc *sc = ifp->if_softc;

	shmif_stop(ifp, 1);
	if_down(ifp);
	finibackend(sc);

	mutex_enter(&sc->sc_mtx);
	sc->sc_dying = true;
	cv_broadcast(&sc->sc_cv);
	mutex_exit(&sc->sc_mtx);

	if (sc->sc_rcvl)
		kthread_join(sc->sc_rcvl);
	sc->sc_rcvl = NULL;

	vmem_xfree(shmif_units, sc->sc_unit+1, 1);

	ether_ifdetach(ifp);
	if_detach(ifp);

	cv_destroy(&sc->sc_cv);
	mutex_destroy(&sc->sc_mtx);

	kmem_free(sc, sizeof(*sc));

	return 0;
}
Пример #2
0
int
px_fdvma_release(dev_info_t *dip, px_t *px_p, ddi_dma_impl_t *mp)
{
	px_mmu_t *mmu_p = px_p->px_mmu_p;
	size_t npages;
	fdvma_t *fdvma_p = (fdvma_t *)mp->dmai_fdvma;

	if (px_disable_fdvma)
		return (DDI_FAILURE);

	/* validate fdvma handle */
	if (!(mp->dmai_rflags & DMP_BYPASSNEXUS)) {
		DBG(DBG_DMA_CTL, dip, "DDI_DMA_RELEASE: not fast dma\n");
		return (DDI_FAILURE);
	}

	/* flush all reserved dvma addresses from mmu */
	px_mmu_unmap_window(mmu_p, mp);

	npages = mp->dmai_ndvmapages;
	vmem_xfree(mmu_p->mmu_dvma_map, (void *)mp->dmai_mapping,
	    MMU_PTOB(npages));

	mmu_p->mmu_dvma_reserve += npages;
	mp->dmai_ndvmapages = 0;

	/* see if there is anyone waiting for dvma space */
	if (mmu_p->mmu_dvma_clid != 0) {
		DBG(DBG_DMA_CTL, dip, "run dvma callback\n");
		ddi_run_callback(&mmu_p->mmu_dvma_clid);
	}

	/* free data structures */
	kmem_free(fdvma_p->pagecnt, npages * sizeof (uint_t));
	kmem_free(fdvma_p, sizeof (fdvma_t));
	kmem_free(mp, sizeof (px_dma_hdl_t));

	/* see if there is anyone waiting for kmem */
	if (px_kmem_clid != 0) {
		DBG(DBG_DMA_CTL, dip, "run handle callback\n");
		ddi_run_callback(&px_kmem_clid);
	}
	return (DDI_SUCCESS);
}
Пример #3
0
void
sc_destroy(pci_t *pci_p)
{
	sc_t *sc_p;

	if (!pci_stream_buf_exists)
		return;

	sc_p = pci_p->pci_sc_p;

	DEBUG0(DBG_DETACH, pci_p->pci_dip, "sc_destroy:\n");

	vmem_xfree(static_alloc_arena, sc_p->sc_sync_flag_base,
	    PCI_SYNC_FLAG_SIZE);

	/*
	 * Free the streaming cache state structure.
	 */
	kmem_free(sc_p, sizeof (sc_t));
	pci_p->pci_sc_p = NULL;
}
Пример #4
0
/*
 * Free specified single object.
 */
void
memguard_free(void *ptr)
{
	vm_offset_t addr;
	u_long req_size, size, sizev;
	char *temp;
	int i;

	addr = trunc_page((uintptr_t)ptr);
	req_size = *v2sizep(addr);
	sizev = *v2sizev(addr);
	size = round_page(req_size);

	/*
	 * Page should not be guarded right now, so force a write.
	 * The purpose of this is to increase the likelihood of
	 * catching a double-free, but not necessarily a
	 * tamper-after-free (the second thread freeing might not
	 * write before freeing, so this forces it to and,
	 * subsequently, trigger a fault).
	 */
	temp = ptr;
	for (i = 0; i < size; i += PAGE_SIZE)
		temp[i] = 'M';

	/*
	 * This requires carnal knowledge of the implementation of
	 * kmem_free(), but since we've already replaced kmem_malloc()
	 * above, it's not really any worse.  We want to use the
	 * vm_map lock to serialize updates to memguard_wasted, since
	 * we had the lock at increment.
	 */
	kmem_unback(kmem_object, addr, size);
	if (sizev > size)
		addr -= PAGE_SIZE;
	vmem_xfree(memguard_arena, addr, sizev);
	if (req_size < PAGE_SIZE)
		memguard_wasted -= (PAGE_SIZE - req_size);
}
Пример #5
0
/*
 * Allocate a single object of specified size with specified flags
 * (either M_WAITOK or M_NOWAIT).
 */
void *
memguard_alloc(unsigned long req_size, int flags)
{
	vm_offset_t addr;
	u_long size_p, size_v;
	int do_guard, rv;

	size_p = round_page(req_size);
	if (size_p == 0)
		return (NULL);
	/*
	 * To ensure there are holes on both sides of the allocation,
	 * request 2 extra pages of KVA.  We will only actually add a
	 * vm_map_entry and get pages for the original request.  Save
	 * the value of memguard_options so we have a consistent
	 * value.
	 */
	size_v = size_p;
	do_guard = (memguard_options & MG_GUARD_AROUND) != 0;
	if (do_guard)
		size_v += 2 * PAGE_SIZE;

	/*
	 * When we pass our memory limit, reject sub-page allocations.
	 * Page-size and larger allocations will use the same amount
	 * of physical memory whether we allocate or hand off to
	 * uma_large_alloc(), so keep those.
	 */
	if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit &&
	    req_size < PAGE_SIZE) {
		addr = (vm_offset_t)NULL;
		memguard_fail_pgs++;
		goto out;
	}
	/*
	 * Keep a moving cursor so we don't recycle KVA as long as
	 * possible.  It's not perfect, since we don't know in what
	 * order previous allocations will be free'd, but it's simple
	 * and fast, and requires O(1) additional storage if guard
	 * pages are not used.
	 *
	 * XXX This scheme will lead to greater fragmentation of the
	 * map, unless vm_map_findspace() is tweaked.
	 */
	for (;;) {
		if (vmem_xalloc(memguard_arena, size_v, 0, 0, 0,
		    memguard_cursor, VMEM_ADDR_MAX,
		    M_BESTFIT | M_NOWAIT, &addr) == 0)
			break;
		/*
		 * The map has no space.  This may be due to
		 * fragmentation, or because the cursor is near the
		 * end of the map.
		 */
		if (memguard_cursor == memguard_base) {
			memguard_fail_kva++;
			addr = (vm_offset_t)NULL;
			goto out;
		}
		memguard_wrap++;
		memguard_cursor = memguard_base;
	}
	if (do_guard)
		addr += PAGE_SIZE;
	rv = kmem_back(kmem_object, addr, size_p, flags);
	if (rv != KERN_SUCCESS) {
		vmem_xfree(memguard_arena, addr, size_v);
		memguard_fail_pgs++;
		addr = (vm_offset_t)NULL;
		goto out;
	}
	memguard_cursor = addr + size_v;
	*v2sizep(trunc_page(addr)) = req_size;
	*v2sizev(trunc_page(addr)) = size_v;
	memguard_succ++;
	if (req_size < PAGE_SIZE) {
		memguard_wasted += (PAGE_SIZE - req_size);
		if (do_guard) {
			/*
			 * Align the request to 16 bytes, and return
			 * an address near the end of the page, to
			 * better detect array overrun.
			 */
			req_size = roundup2(req_size, 16);
			addr += (PAGE_SIZE - req_size);
		}
	}
out:
	return ((void *)addr);
}