Exemple #1
0
int dds_cond_destroy (cond_t *cv)
{
	ev_destroy (cv->waiters_done);
	lock_destroy (cv->waiters_lock);
	sema_destroy (cv->sema);
	return (DDS_RETCODE_OK);
}
Exemple #2
0
/*
 * Net VSC on device remove
 */
int
hv_nv_on_device_remove(struct hv_device *device, boolean_t destroy_channel)
{
	hn_softc_t *sc = device_get_softc(device->device);
	netvsc_dev *net_dev = sc->net_dev;;
	
	/* Stop outbound traffic ie sends and receives completions */
	net_dev->destroy = TRUE;

	hv_nv_disconnect_from_vsp(net_dev);

	/* At this point, no one should be accessing net_dev except in here */

	/* Now, we can close the channel safely */

	if (!destroy_channel) {
		device->channel->state =
		    HV_CHANNEL_CLOSING_NONDESTRUCTIVE_STATE;
	}

	free(device->channel->hv_chan_rdbuf, M_NETVSC);
	hv_vmbus_channel_close(device->channel);

	sema_destroy(&net_dev->channel_init_sema);
	free(net_dev, M_NETVSC);

	return (0);
}
Exemple #3
0
hot_err_t hot_sema_Destroy(hot_sema_t sema) {
  assert(sema) ;
  
  if (sema_destroy(&sema->sema))
    return hot_err_Create(0,"hot_sema_Destroy: sema_destroy");

  free(sema);
  return HOT_OK;
}
void _rtw_free_sema(_sema	*sema)
{
#ifdef PLATFORM_FREEBSD
	sema_destroy(sema);
#endif
#ifdef PLATFORM_OS_CE
	CloseHandle(*sema);
#endif

}
Exemple #5
0
void
fuse_deinit_session(fuse_session_t *se)
{
	mutex_destroy(&se->avl_mutx);
	sema_destroy(&se->session_sema);
	mutex_destroy(&se->session_mutx);
	list_destroy(&se->msg_list);
	fuse_avl_destroy(&se->avl_cache);
	fuse_session_clear_cred(se);
}
static int
bcm2835_cpufreq_detach(device_t dev)
{
	struct bcm2835_cpufreq_softc *sc;

	sc = device_get_softc(dev);

	sema_destroy(&vc_sema);

	return (cpufreq_unregister(dev));
}
Exemple #7
0
/*
 * Net VSC on device add
 * 
 * Callback when the device belonging to this driver is added
 */
netvsc_dev *
hv_nv_on_device_add(struct hv_device *device, void *additional_info)
{
	struct hv_vmbus_channel *chan = device->channel;
	netvsc_dev *net_dev;
	int ret = 0;

	net_dev = hv_nv_alloc_net_device(device);
	if (net_dev == NULL)
		return NULL;

	/* Initialize the NetVSC channel extension */

	sema_init(&net_dev->channel_init_sema, 0, "netdev_sema");

	chan->hv_chan_rdbuf = malloc(NETVSC_PACKET_SIZE, M_NETVSC, M_WAITOK);

	/*
	 * Open the channel
	 */
	ret = hv_vmbus_channel_open(chan,
	    NETVSC_DEVICE_RING_BUFFER_SIZE, NETVSC_DEVICE_RING_BUFFER_SIZE,
	    NULL, 0, hv_nv_on_channel_callback, chan);
	if (ret != 0) {
		free(chan->hv_chan_rdbuf, M_NETVSC);
		goto cleanup;
	}

	/*
	 * Connect with the NetVsp
	 */
	ret = hv_nv_connect_to_vsp(device);
	if (ret != 0)
		goto close;

	return (net_dev);

close:
	/* Now, we can close the channel safely */
	free(chan->hv_chan_rdbuf, M_NETVSC);
	hv_vmbus_channel_close(chan);

cleanup:
	/*
	 * Free the packet buffers on the netvsc device packet queue.
	 * Release other resources.
	 */
	if (net_dev) {
		sema_destroy(&net_dev->channel_init_sema);
		free(net_dev, M_NETVSC);
	}

	return (NULL);
}
void f_semDestroy(
	F_SEM  *		phSem)
{
	f_assert( phSem != NULL);

	if (*phSem != F_SEM_NULL)
	{
		sema_destroy( (sema_t *)*phSem);
		f_free( phSem);
		*phSem = F_SEM_NULL;
	}
}
Exemple #9
0
/*
 * iscsi_door_term
 *
 * This function releases the resources allocated to handle the door
 * upcall.  It disconnects from the door if currently connected.
 */
boolean_t
iscsi_door_term(void)
{
	ASSERT(iscsi_door_init);
	if (iscsi_door_init) {
		iscsi_door_init = B_FALSE;
		iscsi_door_unbind();
		rw_destroy(&iscsi_door_lock);
		sema_destroy(&iscsi_door_sema);
		return (B_TRUE);
	}
	return (B_FALSE);
}
Exemple #10
0
int
sem_destroy(sem_t *sem)
{
	int	error;

	if (sem_invalid(sem))
		return (-1);
	if ((error = sema_destroy((sema_t *)sem)) != 0) {
		errno = error;
		return (-1);
	}
	return (0);
}
Exemple #11
0
/*
 * Net VSC on device add
 * 
 * Callback when the device belonging to this driver is added
 */
netvsc_dev *
hv_nv_on_device_add(struct hn_softc *sc, void *additional_info,
    struct hn_rx_ring *rxr)
{
	struct vmbus_channel *chan = sc->hn_prichan;
	netvsc_dev *net_dev;
	int ret = 0;

	net_dev = hv_nv_alloc_net_device(sc);
	if (net_dev == NULL)
		return NULL;

	/* Initialize the NetVSC channel extension */

	sema_init(&net_dev->channel_init_sema, 0, "netdev_sema");

	/*
	 * Open the channel
	 */
	KASSERT(rxr->hn_rx_idx == vmbus_chan_subidx(chan),
	    ("chan%u subidx %u, rxr%d mismatch",
	     vmbus_chan_id(chan), vmbus_chan_subidx(chan), rxr->hn_rx_idx));
	ret = vmbus_chan_open(chan,
	    NETVSC_DEVICE_RING_BUFFER_SIZE, NETVSC_DEVICE_RING_BUFFER_SIZE,
	    NULL, 0, hv_nv_on_channel_callback, rxr);
	if (ret != 0)
		goto cleanup;

	/*
	 * Connect with the NetVsp
	 */
	ret = hv_nv_connect_to_vsp(sc);
	if (ret != 0)
		goto close;

	return (net_dev);

close:
	/* Now, we can close the channel safely */
	vmbus_chan_close(chan);

cleanup:
	/*
	 * Free the packet buffers on the netvsc device packet queue.
	 * Release other resources.
	 */
	sema_destroy(&net_dev->channel_init_sema);
	free(net_dev, M_NETVSC);

	return (NULL);
}
Exemple #12
0
static inline void
hv_put_rndis_request(rndis_device *device, rndis_request *request)
{
	mtx_lock_spin(&device->req_lock);
	/* Fixme:  Has O(n) performance */
	/*
	 * XXXKYS: Use Doubly linked lists.
	 */
	STAILQ_REMOVE(&device->myrequest_list, request, rndis_request_,
	    mylist_entry);
	mtx_unlock_spin(&device->req_lock);

	sema_destroy(&request->wait_sema);
	free(request, M_DEVBUF);
}
Exemple #13
0
static int
bcm2835_cpufreq_detach(device_t dev)
{
	struct bcm2835_cpufreq_softc *sc;

	sc = device_get_softc(dev);

	sema_destroy(&vc_sema);

	if (sc->dma_phys != 0)
		bus_dmamap_unload(sc->dma_tag, sc->dma_map);
	if (sc->dma_buf != NULL)
		bus_dmamem_free(sc->dma_tag, sc->dma_buf, sc->dma_map);
	if (sc->dma_tag != NULL)
		bus_dma_tag_destroy(sc->dma_tag);

	return (cpufreq_unregister(dev));
}
Exemple #14
0
/*
 * Net VSC on device remove
 */
int
hv_nv_on_device_remove(struct hn_softc *sc, boolean_t destroy_channel)
{
	netvsc_dev *net_dev = sc->net_dev;;
	
	/* Stop outbound traffic ie sends and receives completions */
	net_dev->destroy = TRUE;

	hv_nv_disconnect_from_vsp(net_dev);

	/* At this point, no one should be accessing net_dev except in here */

	/* Now, we can close the channel safely */

	vmbus_chan_close(sc->hn_prichan);

	sema_destroy(&net_dev->channel_init_sema);
	free(net_dev, M_NETVSC);

	return (0);
}
void	_rtw_mutex_free(_mutex *pmutex)
{
#ifdef PLATFORM_LINUX

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
	mutex_destroy(pmutex);
#else	
#endif

#ifdef PLATFORM_FREEBSD
	sema_destroy(pmutex);
#endif

#endif

#ifdef PLATFORM_OS_XP

#endif

#ifdef PLATFORM_OS_CE

#endif
}
Exemple #16
0
/*
 * Free lfs node since no longer in use
 */
static void
freelfsnode(struct lfsnode *lfs, struct loinfo *li)
{
    struct lfsnode *prev = NULL;
    struct lfsnode *this;

    ASSERT(MUTEX_HELD(&li->li_lfslock));
    ASSERT(li->li_refct > 0);
    for (this = li->li_lfs; this != NULL; this = this->lfs_next) {
        if (this == lfs) {
            ASSERT(lfs->lfs_vfs.vfs_count == 1);
            if (prev == NULL)
                li->li_lfs = lfs->lfs_next;
            else
                prev->lfs_next = lfs->lfs_next;
            if (lfs->lfs_realrootvp != NULL) {
                VN_RELE(lfs->lfs_realrootvp);
            }
            if (lfs->lfs_vfs.vfs_mntpt != NULL)
                refstr_rele(lfs->lfs_vfs.vfs_mntpt);
            if (lfs->lfs_vfs.vfs_implp != NULL) {
                ASSERT(lfs->lfs_vfs.vfs_femhead == NULL);
                ASSERT(lfs->lfs_vfs.vfs_vskap == NULL);
                ASSERT(lfs->lfs_vfs.vfs_fstypevsp == NULL);
                kmem_free(lfs->lfs_vfs.vfs_implp,
                          sizeof (vfs_impl_t));
            }
            sema_destroy(&lfs->lfs_vfs.vfs_reflock);
            kmem_free(lfs, sizeof (struct lfsnode));
            return;
        }
        prev = this;
    }
    panic("freelfsnode");
    /*NOTREACHED*/
}
int
raid_resync_region(
	md_raidcs_t	*cs,
	diskaddr_t	line,
	uint_t		line_count,
	int		*single_read,
	hs_cmds_t	*hs_state,
	int		*err_col,
	md_dev64_t	dev_to_write,
	diskaddr_t	write_dev_start)
{
	mr_unit_t 	*un = cs->cs_un;
	buf_t		*readb1 = &cs->cs_pbuf;
	buf_t		*readb2 = &cs->cs_dbuf;
	buf_t		*writeb = &cs->cs_hbuf;
	diskaddr_t	off;
	size_t		tcopysize;
	size_t		copysize;
	int 		resync;
	int		quit = 0;
	size_t		leftinseg;
	int		i;

	resync = un->un_resync_index;
	off = line * un->un_segsize;
	copysize = un->un_resync_copysize;

	/* find first column to read, skip resync column */

	leftinseg = un->un_segsize * line_count;
	while (leftinseg) {

		/* truncate last chunk to end if needed */
		if (copysize > leftinseg)
			tcopysize = leftinseg;
		else
			tcopysize = copysize;
		leftinseg -= tcopysize;

		/*
		 * One of two scenarios:
		 * 1) resync device with hotspare ok.  This implies that
		 *    we are copying from a good hotspare to a new good original
		 *    device.  In this case readb1 is used as the buf for
		 *    the read from the hotspare device.
		 * 2) For all other cases, including when in case 1) and an
		 *    error is detected on the (formerly good) hotspare device,
		 *    readb1 is used for the initial read.  readb2 is used for
		 *    all other reads.	Each readb2 buffer is xor'd into the
		 *    readb1 buffer.
		 *
		 * In both cases, writeb is used for the write, using readb1's
		 * buffer.
		 *
		 * For case 2, we could alternatively perform the read for all
		 * devices concurrently to improve performance.	 However,
		 * this could diminish performance for concurrent reads and
		 * writes if low on memory.
		 */

		/* read first buffer */

		/* switch to read from good columns if single_read */
		if (*single_read) {
			if (un->un_column[resync].un_dev == NODEV64)
				return (RAID_RESYNC_RDERROR);

			reset_buf(readb1, B_READ | B_BUSY,
			    dbtob(copysize));
			readb1->b_bcount = dbtob(tcopysize);
			readb1->b_un.b_addr = cs->cs_pbuffer;
			readb1->b_edev = md_dev64_to_dev(
						un->un_column[resync].un_dev);
			readb1->b_lblkno =
			    un->un_column[resync].un_devstart + off;
			(void) md_call_strategy(readb1, MD_STR_NOTTOP, NULL);
			if (biowait(readb1)) {
				/*
				 * at this point just start rebuilding the
				 * data and go on since the other column
				 * are ok.
				 */
				*single_read = 0;
				*hs_state = HS_BAD;
				un->un_column[resync].un_devflags &=
				    ~MD_RAID_COPY_RESYNC;
				un->un_column[resync].un_devflags |=
				    MD_RAID_REGEN_RESYNC;
			}
		}

		/* if reading from all non-resync columns */
		if (!*single_read) {
			/* for each column, read line and xor into write buf */
			bzero(cs->cs_pbuffer, dbtob(tcopysize));
			for (i = 0; i < un->un_totalcolumncnt; i++) {

				if (un->un_column[i].un_dev == NODEV64)
					return (RAID_RESYNC_RDERROR);

				/* skip column getting resync'ed */
				if (i == resync) {
					continue;
				}
				reset_buf(readb1, B_READ | B_BUSY,
				    dbtob(copysize));
				readb1->b_bcount = dbtob(tcopysize);
				readb1->b_un.b_addr = cs->cs_dbuffer;
				readb1->b_edev = md_dev64_to_dev(
						un->un_column[i].un_dev);
				readb1->b_lblkno =
				    un->un_column[i].un_devstart + off;

				(void) md_call_strategy(readb1, MD_STR_NOTTOP,
					NULL);
				if (biowait(readb1)) {
					*err_col = i;
					quit = RAID_RESYNC_RDERROR;
				}

				if (quit)
					return (quit);

				/* xor readb2 data into readb1 */
				xor(cs->cs_pbuffer, readb1->b_un.b_addr,
				    dbtob(tcopysize));
			} /* for */
		}

		reset_buf(writeb, B_WRITE | B_BUSY,
		    dbtob(copysize));
		writeb->b_bcount = dbtob(tcopysize);
		writeb->b_un.b_addr = cs->cs_pbuffer;
		writeb->b_lblkno = off + write_dev_start;
		writeb->b_edev = md_dev64_to_dev(dev_to_write);

		/* set write block number and perform the write */
		(void) md_call_strategy(writeb, MD_STR_NOTTOP, NULL);
		if (biowait(writeb)) {
			if (*single_read == 0) {
				*hs_state = HS_BAD;
			}
			return (RAID_RESYNC_WRERROR);
		}
		writeb->b_blkno += tcopysize;
		off += tcopysize;
	} /* while */
	sema_destroy(&readb1->b_io);
	sema_destroy(&readb1->b_sem);
	sema_destroy(&readb2->b_io);
	sema_destroy(&readb2->b_sem);
	sema_destroy(&writeb->b_io);
	sema_destroy(&writeb->b_sem);
	return (RAID_RESYNC_OKAY);
}
void
destroy_buf(buf_t *bp)
{
	sema_destroy(&bp->b_io);
	sema_destroy(&bp->b_sem);
}
Exemple #19
0
void sys_sem_free(sys_sem_t *sem)
{
    sema_destroy((sema_t *)sem);
}
Exemple #20
0
static void
fuse_deinit_msg(fuse_msg_node_t *msg_p)
{
	sema_destroy(&msg_p->fmn_sema);
	cv_destroy(&msg_p->fmn_cv);
}
Exemple #21
0
/*
 * Release the buffer, with no I/O implied.
 */
void
brelse(struct buf *bp)
{
	struct buf	**backp;
	uint_t		index;
	kmutex_t	*hmp;
	struct	buf	*dp;
	struct	hbuf	*hp;


	ASSERT(SEMA_HELD(&bp->b_sem));

	/*
	 * Clear the retry write flag if the buffer was written without
	 * error.  The presence of B_DELWRI means the buffer has not yet
	 * been written and the presence of B_ERROR means that an error
	 * is still occurring.
	 */
	if ((bp->b_flags & (B_ERROR | B_DELWRI | B_RETRYWRI)) == B_RETRYWRI) {
		bp->b_flags &= ~B_RETRYWRI;
	}

	/* Check for anomalous conditions */
	if (bp->b_flags & (B_ERROR|B_NOCACHE)) {
		if (bp->b_flags & B_NOCACHE) {
			/* Don't add to the freelist. Destroy it now */
			kmem_free(bp->b_un.b_addr, bp->b_bufsize);
			sema_destroy(&bp->b_sem);
			sema_destroy(&bp->b_io);
			kmem_free(bp, sizeof (struct buf));
			return;
		}
		/*
		 * If a write failed and we are supposed to retry write,
		 * don't toss the buffer.  Keep it around and mark it
		 * delayed write in the hopes that it will eventually
		 * get flushed (and still keep the system running.)
		 */
		if ((bp->b_flags & (B_READ | B_RETRYWRI)) == B_RETRYWRI) {
			bp->b_flags |= B_DELWRI;
			/* keep fsflush from trying continuously to flush */
			bp->b_start = ddi_get_lbolt();
		} else
			bp->b_flags |= B_AGE|B_STALE;
		bp->b_flags &= ~B_ERROR;
		bp->b_error = 0;
	}

	/*
	 * If delayed write is set then put in on the delayed
	 * write list instead of the free buffer list.
	 */
	index = bio_bhash(bp->b_edev, bp->b_blkno);
	hmp   = &hbuf[index].b_lock;

	mutex_enter(hmp);
	hp = &hbuf[index];
	dp = (struct buf *)hp;

	/*
	 * Make sure that the number of entries on this list are
	 * Zero <= count <= total # buffers
	 */
	ASSERT(hp->b_length >= 0);
	ASSERT(hp->b_length < nbuf);

	hp->b_length++;		/* We are adding this buffer */

	if (bp->b_flags & B_DELWRI) {
		/*
		 * This buffer goes on the delayed write buffer list
		 */
		dp = (struct buf *)&dwbuf[index];
	}
	ASSERT(bp->b_bufsize > 0);
	ASSERT(bp->b_bcount > 0);
	ASSERT(bp->b_un.b_addr != NULL);

	if (bp->b_flags & B_AGE) {
		backp = &dp->av_forw;
		(*backp)->av_back = bp;
		bp->av_forw = *backp;
		*backp = bp;
		bp->av_back = dp;
	} else {
		backp = &dp->av_back;
		(*backp)->av_forw = bp;
		bp->av_back = *backp;
		*backp = bp;
		bp->av_forw = dp;
	}
	mutex_exit(hmp);

	if (bfreelist.b_flags & B_WANTED) {
		/*
		 * Should come here very very rarely.
		 */
		mutex_enter(&bfree_lock);
		if (bfreelist.b_flags & B_WANTED) {
			bfreelist.b_flags &= ~B_WANTED;
			cv_broadcast(&bio_mem_cv);
		}
		mutex_exit(&bfree_lock);
	}

	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC);
	/*
	 * Don't let anyone get the buffer off the freelist before we
	 * release our hold on it.
	 */
	sema_v(&bp->b_sem);
}
Exemple #22
0
void sys_mbox_free(sys_mbox_t *mbox)
{
    sema_destroy(&mbox->not_empty);
    sema_destroy(&mbox->not_full);
}
void my_sema_free(void *semptr) {
  sema_destroy((sema_t *)semptr);
  free(semptr);
}