Пример #1
0
void curve25519_free(void *curvep)
{
	struct curve25519_struct *curve = curvep;

	memset(curve->enc_buf, 0, curve->enc_buf_size);
	memset(curve->dec_buf, 0, curve->dec_buf_size);

        xfree(curve->enc_buf);
        xfree(curve->dec_buf);

        spinlock_destroy(&curve->enc_lock);
        spinlock_destroy(&curve->dec_lock);
}
Пример #2
0
void curve25519_free(void *vc)
{
        struct curve25519_struct *c = vc;

        if (!c)
                return;

	memset(c->enc_buf, 0, c->enc_buf_size);
	memset(c->dec_buf, 0, c->dec_buf_size);

        xfree(c->enc_buf);
        xfree(c->dec_buf);

        spinlock_destroy(&c->enc_lock);
        spinlock_destroy(&c->dec_lock);
}
Пример #3
0
void tprintf_cleanup(void)
{
	spinlock_lock(&buffer_lock);
	tprintf_flush();
	spinlock_unlock(&buffer_lock);

	spinlock_destroy(&buffer_lock);
}
Пример #4
0
void cleanup_pcap_sg(void)
{
	unsigned long i;
	spinlock_destroy(&lock);
	for (i = 0; i < IOVSIZ; ++i)
		xfree(iov[i].iov_base);
	pcap_ops_group_unregister(PCAP_OPS_SG);
}
Пример #5
0
/// Niszczy zamek.
void
mutex_destroy(mutex_t *m)
{
    KASSERT( m->mtx_flags & MUTEX_USER || list_length(&m->mtx_locking) == 0 );
    if (m->mtx_flags & MUTEX_CONDVAR)
        KASSERT( m->mtx_flags & MUTEX_USER || list_length(&m->mtx_waiting) == 0 );
    spinlock_destroy(&m->mtx_slock);
}
Пример #6
0
void
xfs_refcache_destroy(void)
{
	if (xfs_refcache) {
		kmem_free(xfs_refcache,
			XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
		xfs_refcache = NULL;
	}
	spinlock_destroy(&xfs_refcache_lock);
}
Пример #7
0
void 
allocator_destroy(void)
{
  size_t i;
  for (i = 0; i < _s_allocator.chunk_count; ++i)
    free(_s_allocator.chunk_list[i]);
  free(_s_allocator.chunk_list);

  spinlock_destroy(&_s_allocator.spinlock);
}
Пример #8
0
/* Destruction is not thread-safe */
void List_Destroy(list_t *list)
{
	list_node *old_node = list->head;
	list_node *current_node = list->head;

	while(current_node)
	{
		old_node = current_node;
		current_node = current_node->next;

		free(old_node);
	}

	list->head = NULL;

	spinlock_destroy(&list->lock);
}
Пример #9
0
static void aio_client_release(aio_client_t* client)
{
	if (0 == atomic_decrement32(&client->ref))
	{
		assert(AIO_NONE == client->state);
		assert(invalid_aio_socket == client->socket);
		assert(RW_NONE == client->data[RECV].state);
		assert(RW_NONE == client->data[SEND].state);

		if (client->handler.ondestroy)
			client->handler.ondestroy(client->param);

		spinlock_destroy(&client->locker);
#if defined(DEBUG) || defined(_DEBUG)
		memset(client, 0xCC, sizeof(*client));
#endif
		free(client);
	}
}
Пример #10
0
error_t barrier_destroy(struct barrier_s *barrier)
{
	register uint_t cntr;
	kmem_req_t req;
  
	if(barrier->signature != BARRIER_ID)
		return EINVAL;

	if((barrier->owner != NULL) && (barrier->owner != current_task))
		return EINVAL;

	req.type = KMEM_PAGE;

#if ARCH_HAS_BARRIERS
	(void) arch_barrier_destroy(barrier->cluster, barrier->hwid);
#else
	if(barrier->owner == NULL)
		cntr = barrier->index;
	else
		cntr = atomic_get(&barrier->waiting);

	if(cntr != 0) return EBUSY;
#endif	/* ARCH_HAS_BARRIERS */

	barrier->signature = 0;
	cpu_wbflush();
    
	for(cntr = 0; cntr < BARRIER_WQDB_NR; cntr++)
	{
		req.ptr = barrier->pages_tbl[cntr];
		kmem_free(&req);
	}

	if(barrier->owner == NULL)
		spinlock_destroy(&barrier->lock);

	return 0;
}
Пример #11
0
/*
 * ktrace_free()
 *
 * Free up the ktrace header and buffer.  It is up to the caller
 * to ensure that no-one is referencing it.
 */
void
ktrace_free(ktrace_t *ktp)
{
	int     entries_size;

	if (ktp == (ktrace_t *)NULL)
		return;

	spinlock_destroy(&ktp->kt_lock);

	/*
	 * Special treatment for the Vnode trace buffer.
	 */
	if (ktp->kt_nentries == ktrace_zentries) {
		kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
	} else {
		entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));

		kmem_free(ktp->kt_entries, entries_size);
	}

	kmem_zone_free(ktrace_hdr_zone, ktp);
}
Пример #12
0
void waitobject_destroy(WaitObject* obj)
{
    spinlock_destroy(&obj->lock);
    list_destroy(&obj->wait_queue);
}
Пример #13
0
void workqueue_destroy(struct workqueue *wq)
{
	heap_destroy(&wq->tasks);
	spinlock_destroy(&wq->lock);
	KOBJ_DESTROY(wq, WORKQUEUE_KMALLOC);
}
Пример #14
0
void cleanup_pcap_mmap(void)
{
	spinlock_destroy(&lock);
	pcap_ops_group_unregister(PCAP_OPS_MMAP);
}
Пример #15
0
void ticker_destroy(struct ticker *ticker)
{
	heap_destroy(&ticker->heap);
	spinlock_destroy(&ticker->lock);
	KOBJ_DESTROY(ticker, TICKER_KMALLOC);
}
Пример #16
0
void stack_destroy(struct stack *stack)
{
	spinlock_destroy(&stack->lock);
	KOBJ_DESTROY(stack, STACK_KMALLOC);
}
Пример #17
0
int
dm_add_fsys_entry(
	vfs_t		*vfsp,
	dm_tokevent_t	*tevp)
{
	dm_fsreg_t	*fsrp;
	int		msgsize;
	void		*msg;
	int		lc;			/* lock cookie */

	/* Allocate and initialize a dm_fsreg_t structure for the filesystem. */

	msgsize = tevp->te_allocsize - offsetof(dm_tokevent_t, te_event);
	msg = kmem_alloc(msgsize, KM_SLEEP);
	bcopy(&tevp->te_event, msg, msgsize);

	fsrp = kmem_zalloc(sizeof(*fsrp), KM_SLEEP);
	fsrp->fr_vfsp = vfsp;
	fsrp->fr_tevp = tevp;
	fsrp->fr_fsid = *vfsp->vfs_altfsid;
	fsrp->fr_msg = msg;
	fsrp->fr_msgsize = msgsize;
	fsrp->fr_state = DM_STATE_MOUNTING;
	sv_init(&fsrp->fr_dispq, SV_DEFAULT, "fr_dispq");
	sv_init(&fsrp->fr_queue, SV_DEFAULT, "fr_queue");
	spinlock_init(&fsrp->fr_lock, "fr_lock");

	/* If no other mounted DMAPI filesystem already has this same
	   fsid_t, then add this filesystem to the list.
	*/

	lc = mutex_spinlock(&dm_reg_lock);

	if (!dm_find_fsreg(vfsp->vfs_altfsid)) {
		fsrp->fr_next = dm_registers;
		dm_registers = fsrp;
		dm_fsys_cnt++;
#ifdef CONFIG_PROC_FS
		{
		char buf[100];
		struct proc_dir_entry *entry;

		sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
		entry = create_proc_read_entry(buf, 0, 0, fsreg_read_pfs, fsrp);
		entry->owner = THIS_MODULE;
		}
#endif
		mutex_spinunlock(&dm_reg_lock, lc);
		return(0);
	}

	/* A fsid_t collision occurred, so prevent this new filesystem from
	   mounting.
	*/

	mutex_spinunlock(&dm_reg_lock, lc);

	sv_destroy(&fsrp->fr_dispq);
	sv_destroy(&fsrp->fr_queue);
	spinlock_destroy(&fsrp->fr_lock);
	kmem_free(fsrp->fr_msg, fsrp->fr_msgsize);
	kmem_free(fsrp, sizeof(*fsrp));
	return(EBUSY);
}
Пример #18
0
void atomic64_destroy(atomic64_t *v)
{
    spinlock_destroy(&v->spinlock);
}
Пример #19
0
void
dm_remove_fsys_entry(
	vfs_t		*vfsp)
{
	dm_fsreg_t	**fsrpp;
	dm_fsreg_t	*fsrp;
	int		lc;			/* lock cookie */

	/* Find the filesystem referenced by the vfsp's fsid_t and dequeue
	   it after verifying that the fr_state shows a filesystem that is
	   either mounting or unmounted.
	*/

	lc = mutex_spinlock(&dm_reg_lock);

	fsrpp = &dm_registers;
	while ((fsrp = *fsrpp) != NULL) {
		if (!bcmp(&fsrp->fr_fsid, vfsp->vfs_altfsid, sizeof(fsrp->fr_fsid)))
			break;
		fsrpp = &fsrp->fr_next;
	}
	if (fsrp == NULL) {
		mutex_spinunlock(&dm_reg_lock, lc);
		panic("dm_remove_fsys_entry: can't find DMAPI fsrp for "
			"vfsp %p\n", vfsp);
	}

	nested_spinlock(&fsrp->fr_lock);

	/* Verify that it makes sense to remove this entry. */

	if (fsrp->fr_state != DM_STATE_MOUNTING &&
	    fsrp->fr_state != DM_STATE_UNMOUNTED) {
		nested_spinunlock(&fsrp->fr_lock);
		mutex_spinunlock(&dm_reg_lock, lc);
		panic("dm_remove_fsys_entry: DMAPI sequence error: old state "
			"%d, fsrp %p\n", fsrp->fr_state, fsrp);
	}

	*fsrpp = fsrp->fr_next;
	dm_fsys_cnt--;

	nested_spinunlock(&dm_reg_lock);

	/* Since the filesystem is about to finish unmounting, we must be sure
	   that no vnodes are being referenced within the filesystem before we
	   let this event thread continue.  If the filesystem is currently in
	   state DM_STATE_MOUNTING, then we know by definition that there can't
	   be any references.  If the filesystem is DM_STATE_UNMOUNTED, then
	   any application threads referencing handles with DM_NO_TOKEN should
	   have already been awakened by dm_change_fsys_entry and should be
	   long gone by now.  Just in case they haven't yet left, sleep here
	   until they are really gone.
	*/

	while (fsrp->fr_hdlcnt) {
		fsrp->fr_unmount++;
		sv_wait(&fsrp->fr_queue, 1, &fsrp->fr_lock, lc);
		lc = mutex_spinlock(&fsrp->fr_lock);
		fsrp->fr_unmount--;
	}
	mutex_spinunlock(&fsrp->fr_lock, lc);

	/* Release all memory. */

#ifdef CONFIG_PROC_FS
	{
	char buf[100];
	sprintf(buf, DMAPI_DBG_PROCFS "/fsreg/0x%p", fsrp);
	remove_proc_entry(buf, NULL);
	}
#endif
	sv_destroy(&fsrp->fr_dispq);
	sv_destroy(&fsrp->fr_queue);
	spinlock_destroy(&fsrp->fr_lock);
	kmem_free(fsrp->fr_msg, fsrp->fr_msgsize);
	kmem_free(fsrp, sizeof(*fsrp));
}
Пример #20
0
void
dm_uninit(void)
{
	int lc;
	dm_session_t *s;

	static void unlink_session( dm_session_t *s);

	if(dm_sessions_active) {
		printk(KERN_ERR "xfs dmapi is being unloaded while there are active sessions\n");

		while( dm_sessions_active ) {
			/* this for-loop mostly from dm_find_session_and_lock() */
			for (;;) {
				s = dm_sessions;
				lc = mutex_spinlock(&dm_session_lock);

				if (nested_spintrylock(&s->sn_qlock)) {
					nested_spinunlock(&dm_session_lock);
					break; /* success */
				}
				mutex_spinunlock(&dm_session_lock, lc);
			}/* for */

			/* this cleanup stuff mostly from dm_destroy_session() */
			if (s->sn_newq.eq_head || s->sn_readercnt || s->sn_delq.eq_head) {
				/* busy session */
				printk(KERN_ERR "   sessid %d (%s) is busy\n", s->sn_sessid, s->sn_info);
				nested_spinunlock(&s->sn_qlock);
				mutex_spinunlock(&dm_session_lock, lc);
				break; /* do not continue */
			}
			else {
				unlink_session(s);
				nested_spinunlock(&s->sn_qlock);
				mutex_spinunlock(&dm_session_lock, lc);
				dm_clear_fsreg(s);
				spinlock_destroy(&s->sn_qlock);
				sv_destroy(&s->sn_readerq);
				sv_destroy(&s->sn_writerq);
				kmem_free(s, sizeof *s);
				printk(KERN_ERR "   sessid %d (%s) destroyed\n", s->sn_sessid, s->sn_info);
			}
		}/*while*/
	}

	/* If any of these are still locked, then we should not allow
	 * an unload.
	 * XXX can any of these be held when no sessions exist?
	 *   - yes, dm_session_lock is acquired prior to adding a new session
	 *   - no, dm_token_lock is only held when a session is locked
	 *   - ?, dm_reg_lock (XXX lookup this one)
	 */

	if( spin_is_locked(&dm_session_lock) )
		printk(KERN_ERR "xfs dmapi is being unloaded while dm_session_lock is held\n");
	if( spin_is_locked(&dm_token_lock) )
		printk(KERN_ERR "xfs dmapi is being unloaded while dm_token_lock is held\n");
	if( spin_is_locked(&dm_reg_lock) )
		printk(KERN_ERR "xfs dmapi is being unloaded while dm_reg_lock is held\n");

	spinlock_destroy(&dm_session_lock);
	spinlock_destroy(&dm_token_lock);
	spinlock_destroy(&dm_reg_lock);
}