Exemplo n.º 1
0
static int __del_msg_handle (struct shim_msg_handle * msgq)
{
    if (msgq->deleted)
        return -EIDRM;

    msgq->deleted = true;
    free(msgq->queue);
    msgq->queuesize = 0;
    msgq->queueused = 0;
    free(msgq->types);
    msgq->ntypes = 0;

    struct shim_handle * hdl = MSG_TO_HANDLE(msgq);

    lock(msgq_list_lock);
    list_del_init(&msgq->list);
    put_handle(hdl);
    if (!hlist_unhashed(&msgq->key_hlist)) {
        hlist_del_init(&msgq->key_hlist);
        put_handle(hdl);
    }
    if (!hlist_unhashed(&msgq->qid_hlist)) {
        hlist_del_init(&msgq->qid_hlist);
        put_handle(hdl);
    }
    unlock(msgq_list_lock);
    return 0;
}
Exemplo n.º 2
0
void pin_remove(struct fs_pin *pin)
{
	spin_lock(&pin_lock);
	hlist_del_init(&pin->m_list);
	hlist_del_init(&pin->s_list);
	spin_unlock(&pin_lock);
	spin_lock_irq(&pin->wait.lock);
	pin->done = 1;
	wake_up_locked(&pin->wait);
	spin_unlock_irq(&pin->wait.lock);
}
Exemplo n.º 3
0
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				    struct kvm_irq_ack_notifier *kian)
{
	mutex_lock(&kvm->irq_lock);
	hlist_del_init(&kian->link);
	mutex_unlock(&kvm->irq_lock);
}
Exemplo n.º 4
0
/*
 * caller should hold one ref on contexts in freelist.
 */
static void dispose_ctx_list_kr(struct hlist_head *freelist)
{
	struct hlist_node	__maybe_unused *pos, *next;
	struct ptlrpc_cli_ctx	*ctx;
	struct gss_cli_ctx	*gctx;

	cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
		hlist_del_init(&ctx->cc_cache);

		/* reverse ctx: update current seq to buddy svcctx if exist.
		 * ideally this should be done at gss_cli_ctx_finalize(), but
		 * the ctx destroy could be delayed by:
		 *  1) ctx still has reference;
		 *  2) ctx destroy is asynchronous;
		 * and reverse import call inval_all_ctx() require this be done
		 * _immediately_ otherwise newly created reverse ctx might copy
		 * the very old sequence number from svcctx. */
		gctx = ctx2gctx(ctx);
		if (!rawobj_empty(&gctx->gc_svc_handle) &&
		    sec_is_reverse(gctx->gc_base.cc_sec)) {
			gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
					(__u32) atomic_read(&gctx->gc_seq));
		}

		/* we need to wakeup waiting reqs here. the context might
		 * be forced released before upcall finished, then the
		 * late-arrived downcall can't find the ctx even. */
		sptlrpc_cli_ctx_wakeup(ctx);

		unbind_ctx_kr(ctx);
		ctx_put_kr(ctx, 0);
	}
Exemplo n.º 5
0
/*
 * Exit and free an icq.  Called with both ioc and q locked.
 */
static void ioc_exit_icq(struct io_cq *icq)
{
	struct io_context *ioc = icq->ioc;
	struct request_queue *q = icq->q;
	struct elevator_type *et = q->elevator->type;

	lockdep_assert_held(&ioc->lock);
	lockdep_assert_held(q->queue_lock);

	radix_tree_delete(&ioc->icq_tree, icq->q->id);
	hlist_del_init(&icq->ioc_node);
	list_del_init(&icq->q_node);

	/*
	 * Both setting lookup hint to and clearing it from @icq are done
	 * under queue_lock.  If it's not pointing to @icq now, it never
	 * will.  Hint assignment itself can race safely.
	 */
	if (rcu_dereference_raw(ioc->icq_hint) == icq)
		rcu_assign_pointer(ioc->icq_hint, NULL);

	if (et->ops.elevator_exit_icq_fn) {
		ioc_release_depth_inc(q);
		et->ops.elevator_exit_icq_fn(icq);
		ioc_release_depth_dec(q);
	}

	/*
	 * @icq->q might have gone away by the time RCU callback runs
	 * making it impossible to determine icq_cache.  Record it in @icq.
	 */
	icq->__rcu_icq_cache = et->icq_cache;
	call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
}
Exemplo n.º 6
0
static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
{
	struct cma_mem *mem = NULL;

	spin_lock(&cma->mem_head_lock);
	if (!hlist_empty(&cma->mem_head)) {
		mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
		hlist_del_init(&mem->node);
	}
Exemplo n.º 7
0
/**
 * lc_del - removes an element from the cache
 * @lc: The lru_cache object
 * @e: The element to remove
 *
 * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list,
 * sets @e->enr to %LC_FREE.
 */
void lc_del(struct lru_cache *lc, struct lc_element *e)
{
	PARANOIA_ENTRY();
	PARANOIA_LC_ELEMENT(lc, e);
	BUG_ON(e->refcnt);

	e->lc_number = LC_FREE;
	hlist_del_init(&e->colision);
	list_move(&e->list, &lc->free);
	RETURN();
}
Exemplo n.º 8
0
/**
 * lc_set - associate index with label
 * @lc: the lru cache to operate on
 * @enr: the label to set
 * @index: the element index to associate label with.
 *
 * Used to initialize the active set to some previously recorded state.
 */
void lc_set(struct lru_cache *lc, unsigned int enr, int index)
{
	struct lc_element *e;

	if (index < 0 || index >= lc->nr_elements)
		return;

	e = lc_element_by_index(lc, index);
	e->lc_number = enr;

	hlist_del_init(&e->colision);
	hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
	list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru);
}
Exemplo n.º 9
0
void nilfs_remove_all_gcinode(struct the_nilfs *nilfs)
{
	struct hlist_head *head = nilfs->ns_gc_inodes_h;
	struct hlist_node *node, *n;
	struct inode *inode;
	int loop;

	for (loop = 0; loop < NILFS_GCINODE_HASH_SIZE; loop++, head++) {
		hlist_for_each_entry_safe(inode, node, n, head, i_hash) {
			hlist_del_init(&inode->i_hash);
			list_del_init(&NILFS_I(inode)->i_dirty);
			nilfs_clear_gcinode(inode); /* might sleep */
		}
	}
Exemplo n.º 10
0
static
void ctx_list_destroy_pf(struct hlist_head *head)
{
	struct ptlrpc_cli_ctx *ctx;

	while (!hlist_empty(head)) {
		ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx,
				      cc_cache);

		LASSERT(atomic_read(&ctx->cc_refcount) == 0);
		LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
				 &ctx->cc_flags) == 0);

		hlist_del_init(&ctx->cc_cache);
		ctx_destroy_pf(ctx->cc_sec, ctx);
	}
}
Exemplo n.º 11
0
/*
 * caller must hold spinlock
 */
static
void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
{
	assert_spin_locked(&ctx->cc_sec->ps_lock);
	LASSERT(atomic_read(&ctx->cc_refcount) > 0);
	LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
	LASSERT(!hlist_unhashed(&ctx->cc_cache));

	clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);

	if (atomic_dec_and_test(&ctx->cc_refcount)) {
		__hlist_del(&ctx->cc_cache);
		hlist_add_head(&ctx->cc_cache, freelist);
	} else {
		hlist_del_init(&ctx->cc_cache);
	}
}
Exemplo n.º 12
0
/**
 * batadv_forw_packet_steal() - claim a forw_packet for free()
 * @forw_packet: the forwarding packet to steal
 * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
 *
 * This function tries to steal a specific forw_packet from global
 * visibility for the purpose of getting it for free(). That means
 * the caller is *not* allowed to requeue it afterwards.
 *
 * Return: True if stealing was successful. False if someone else stole it
 * before us.
 */
bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
			      spinlock_t *lock)
{
	/* did purging routine steal it earlier? */
	spin_lock_bh(lock);
	if (batadv_forw_packet_was_stolen(forw_packet)) {
		spin_unlock_bh(lock);
		return false;
	}

	hlist_del_init(&forw_packet->list);

	/* Just to spot misuse of this function */
	hlist_add_fake(&forw_packet->cleanup_list);

	spin_unlock_bh(lock);
	return true;
}
Exemplo n.º 13
0
int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn)
{
	struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
	struct mlx5i_priv *ipriv = epriv->ppriv;
	struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl;
	struct qpn_to_netdev *node;

	node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn);
	if (!node) {
		mlx5_core_warn(epriv->mdev, "QPN to netdev delete from HT failed\n");
		return -EINVAL;
	}

	spin_lock_bh(&ht->ht_lock);
	hlist_del_init(&node->hlist);
	spin_unlock_bh(&ht->ht_lock);
	kfree(node);

	return 0;
}
Exemplo n.º 14
0
static
void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
{
	LASSERT(ctx->cc_sec);
	LASSERT(atomic_read(&ctx->cc_refcount) > 0);

	cli_ctx_expire(ctx);

	spin_lock(&ctx->cc_sec->ps_lock);

	if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
		LASSERT(!hlist_unhashed(&ctx->cc_cache));
		LASSERT(atomic_read(&ctx->cc_refcount) > 1);

		hlist_del_init(&ctx->cc_cache);
		if (atomic_dec_and_test(&ctx->cc_refcount))
			LBUG();
	}

	spin_unlock(&ctx->cc_sec->ps_lock);
}
Exemplo n.º 15
0
/*
 * Note after this get called, caller should not access ctx again because
 * it might have been freed, unless caller hold at least one refcount of
 * the ctx.
 *
 * return non-zero if we indeed unlist this ctx.
 */
static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
{
	struct ptlrpc_sec	*sec = ctx->cc_sec;
	struct gss_sec_keyring	*gsec_kr = sec2gsec_keyring(sec);

	/* if hashed bit has gone, leave the job to somebody who is doing it */
	if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
		return 0;

	/* drop ref inside spin lock to prevent race with other operations */
	spin_lock_if(&sec->ps_lock, !locked);

	if (gsec_kr->gsk_root_ctx == ctx)
		gsec_kr->gsk_root_ctx = NULL;
	hlist_del_init(&ctx->cc_cache);
	atomic_dec(&ctx->cc_refcount);

	spin_unlock_if(&sec->ps_lock, !locked);

	return 1;
}
Exemplo n.º 16
0
static bool __del_ipc_port (struct shim_ipc_port * port, int type)
{
    debug("deleting port %p (handle %p) for process %u\n",
          port, port->pal_handle, port->info.vmid);

    bool need_restart = false;
    type = type ? (type & port->info.type) : port->info.type;

    if ((type & IPC_PORT_KEEPALIVE) ^
        (port->info.type & IPC_PORT_KEEPALIVE))
        need_restart = true;

    /* if the port still have other usage, we will not remove the port */
    if (port->info.type & ~(type|IPC_PORT_IFPOLL|IPC_PORT_KEEPALIVE)) {
        debug("masking port %p (handle %p): type %x->%x\n",
              port, port->pal_handle, port->info.type, port->info.type & ~type);
        port->info.type &= ~type;
        goto out;
    }

    if (port->info.type & IPC_PORT_IFPOLL)
        need_restart = true;

    if (!list_empty(&port->list)) {
        list_del_init(&port->list);
        port->info.type &= IPC_PORT_IFPOLL;
        __put_ipc_port(port);
    }

    if (!hlist_unhashed(&port->hlist)) {
        hlist_del_init(&port->hlist);
        __put_ipc_port(port);
    }

out:
    port->update = true;
    return need_restart;
}
Exemplo n.º 17
0
/*
 * Set the superblock root dentry.
 * Note that this function frees the inode in case of error.
 */
static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *inode)
{
	/* The mntroot acts as the dummy root dentry for this superblock */
	if (sb->s_root == NULL) {
		sb->s_root = d_make_root(inode);
		if (sb->s_root == NULL)
			return -ENOMEM;
		ihold(inode);
		/*
		 * Ensure that this dentry is invisible to d_find_alias().
		 * Otherwise, it may be spliced into the tree by
		 * d_materialise_unique if a parent directory from the same
		 * filesystem gets mounted at a later time.
		 * This again causes shrink_dcache_for_umount_subtree() to
		 * Oops, since the test for IS_ROOT() will fail.
		 */
		spin_lock(&sb->s_root->d_inode->i_lock);
		spin_lock(&sb->s_root->d_lock);
		hlist_del_init(&sb->s_root->d_alias);
		spin_unlock(&sb->s_root->d_lock);
		spin_unlock(&sb->s_root->d_inode->i_lock);
	}
	return 0;
}
Exemplo n.º 18
0
/* Incoming data */
static void zd1201_usbrx(struct urb *urb)
{
	struct zd1201 *zd = urb->context;
	int free = 0;
	unsigned char *data = urb->transfer_buffer;
	struct sk_buff *skb;
	unsigned char type;

	if (!zd)
		return;

	switch(urb->status) {
		case -EILSEQ:
		case -ENODEV:
		case -ETIME:
		case -ENOENT:
		case -EPIPE:
		case -EOVERFLOW:
		case -ESHUTDOWN:
			dev_warn(&zd->usb->dev, "%s: rx urb failed: %d\n",
			    zd->dev->name, urb->status);
			free = 1;
			goto exit;
	}
	
	if (urb->status != 0 || urb->actual_length == 0)
		goto resubmit;

	type = data[0];
	if (type == ZD1201_PACKET_EVENTSTAT || type == ZD1201_PACKET_RESOURCE) {
		memcpy(zd->rxdata, data, urb->actual_length);
		zd->rxlen = urb->actual_length;
		zd->rxdatas = 1;
		wake_up(&zd->rxdataq);
	}
	/* Info frame */
	if (type == ZD1201_PACKET_INQUIRE) {
		int i = 0;
		unsigned short infotype, framelen, copylen;
		framelen = le16_to_cpu(*(__le16*)&data[4]);
		infotype = le16_to_cpu(*(__le16*)&data[6]);

		if (infotype == ZD1201_INF_LINKSTATUS) {
			short linkstatus;

			linkstatus = le16_to_cpu(*(__le16*)&data[8]);
			switch(linkstatus) {
				case 1:
					netif_carrier_on(zd->dev);
					break;
				case 2:
					netif_carrier_off(zd->dev);
					break;
				case 3:
					netif_carrier_off(zd->dev);
					break;
				case 4:
					netif_carrier_on(zd->dev);
					break;
				default:
					netif_carrier_off(zd->dev);
			}
			goto resubmit;
		}
		if (infotype == ZD1201_INF_ASSOCSTATUS) {
			short status = le16_to_cpu(*(__le16*)(data+8));
			int event;
			union iwreq_data wrqu;

			switch (status) {
				case ZD1201_ASSOCSTATUS_STAASSOC:
				case ZD1201_ASSOCSTATUS_REASSOC:
					event = IWEVREGISTERED;
					break;
				case ZD1201_ASSOCSTATUS_DISASSOC:
				case ZD1201_ASSOCSTATUS_ASSOCFAIL:
				case ZD1201_ASSOCSTATUS_AUTHFAIL:
				default:
					event = IWEVEXPIRED;
			}
			memcpy(wrqu.addr.sa_data, data+10, ETH_ALEN);
			wrqu.addr.sa_family = ARPHRD_ETHER;

			/* Send event to user space */
			wireless_send_event(zd->dev, event, &wrqu, NULL);

			goto resubmit;
		}
		if (infotype == ZD1201_INF_AUTHREQ) {
			union iwreq_data wrqu;

			memcpy(wrqu.addr.sa_data, data+8, ETH_ALEN);
			wrqu.addr.sa_family = ARPHRD_ETHER;
			/* There isn't a event that trully fits this request.
			   We assume that userspace will be smart enough to
			   see a new station being expired and sends back a
			   authstation ioctl to authorize it. */
			wireless_send_event(zd->dev, IWEVEXPIRED, &wrqu, NULL);
			goto resubmit;
		}
		/* Other infotypes are handled outside this handler */
		zd->rxlen = 0;
		while (i < urb->actual_length) {
			copylen = le16_to_cpu(*(__le16*)&data[i+2]);
			/* Sanity check, sometimes we get junk */
			if (copylen+zd->rxlen > sizeof(zd->rxdata))
				break;
			memcpy(zd->rxdata+zd->rxlen, data+i+4, copylen);
			zd->rxlen += copylen;
			i += 64;
		}
		if (i >= urb->actual_length) {
			zd->rxdatas = 1;
			wake_up(&zd->rxdataq);
		}
		goto  resubmit;
	}
	/* Actual data */
	if (data[urb->actual_length-1] == ZD1201_PACKET_RXDATA) {
		int datalen = urb->actual_length-1;
		unsigned short len, fc, seq;

		len = ntohs(*(__be16 *)&data[datalen-2]);
		if (len>datalen)
			len=datalen;
		fc = le16_to_cpu(*(__le16 *)&data[datalen-16]);
		seq = le16_to_cpu(*(__le16 *)&data[datalen-24]);

		if (zd->monitor) {
			if (datalen < 24)
				goto resubmit;
			if (!(skb = dev_alloc_skb(datalen+24)))
				goto resubmit;
			
			memcpy(skb_put(skb, 2), &data[datalen-16], 2);
			memcpy(skb_put(skb, 2), &data[datalen-2], 2);
			memcpy(skb_put(skb, 6), &data[datalen-14], 6);
			memcpy(skb_put(skb, 6), &data[datalen-22], 6);
			memcpy(skb_put(skb, 6), &data[datalen-8], 6);
			memcpy(skb_put(skb, 2), &data[datalen-24], 2);
			memcpy(skb_put(skb, len), data, len);
			skb->protocol = eth_type_trans(skb, zd->dev);
			zd->dev->stats.rx_packets++;
			zd->dev->stats.rx_bytes += skb->len;
			netif_rx(skb);
			goto resubmit;
		}
			
		if ((seq & IEEE80211_SCTL_FRAG) ||
		    (fc & IEEE80211_FCTL_MOREFRAGS)) {
			struct zd1201_frag *frag = NULL;
			char *ptr;

			if (datalen<14)
				goto resubmit;
			if ((seq & IEEE80211_SCTL_FRAG) == 0) {
				frag = kmalloc(sizeof(*frag), GFP_ATOMIC);
				if (!frag)
					goto resubmit;
				skb = dev_alloc_skb(IEEE80211_MAX_DATA_LEN +14+2);
				if (!skb) {
					kfree(frag);
					goto resubmit;
				}
				frag->skb = skb;
				frag->seq = seq & IEEE80211_SCTL_SEQ;
				skb_reserve(skb, 2);
				memcpy(skb_put(skb, 12), &data[datalen-14], 12);
				memcpy(skb_put(skb, 2), &data[6], 2);
				memcpy(skb_put(skb, len), data+8, len);
				hlist_add_head(&frag->fnode, &zd->fraglist);
				goto resubmit;
			}
			hlist_for_each_entry(frag, &zd->fraglist, fnode)
				if (frag->seq == (seq&IEEE80211_SCTL_SEQ))
					break;
			if (!frag)
				goto resubmit;
			skb = frag->skb;
			ptr = skb_put(skb, len);
			if (ptr)
				memcpy(ptr, data+8, len);
			if (fc & IEEE80211_FCTL_MOREFRAGS)
				goto resubmit;
			hlist_del_init(&frag->fnode);
			kfree(frag);
		} else {
			if (datalen<14)
Exemplo n.º 19
0
void ax25_linkfail_release(struct ax25_linkfail *lf)
{
	spin_lock_bh(&linkfail_lock);
	hlist_del_init(&lf->lf_node);
	spin_unlock_bh(&linkfail_lock);
}
Exemplo n.º 20
0
int
afs_CheckRootVolume(void)
{
    char rootVolName[32];
    struct volume *tvp = NULL;
    int usingDynroot = afs_GetDynrootEnable();
    int localcell;

    AFS_STATCNT(afs_CheckRootVolume);
    if (*afs_rootVolumeName == 0) {
	strcpy(rootVolName, "root.afs");
    } else {
	strcpy(rootVolName, afs_rootVolumeName);
    }

    if (usingDynroot) {
	afs_GetDynrootFid(&afs_rootFid);
	tvp = afs_GetVolume(&afs_rootFid, NULL, READ_LOCK);
    } else {
	struct cell *lc = afs_GetPrimaryCell(READ_LOCK);

	if (!lc)
	    return ENOENT;
	localcell = lc->cellNum;
	afs_PutCell(lc, READ_LOCK);
	tvp = afs_GetVolumeByName(rootVolName, localcell, 1, NULL, READ_LOCK);
	if (!tvp) {
	    char buf[128];
	    int len = strlen(rootVolName);

	    if ((len < 9) || strcmp(&rootVolName[len - 9], ".readonly")) {
		strcpy(buf, rootVolName);
		afs_strcat(buf, ".readonly");
		tvp = afs_GetVolumeByName(buf, localcell, 1, NULL, READ_LOCK);
	    }
	}
	if (tvp) {
	    int volid = (tvp->roVol ? tvp->roVol : tvp->volume);
	    afs_rootFid.Cell = localcell;
	    if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
		&& afs_globalVp) {
		/* If we had a root fid before and it changed location we reset
		 * the afs_globalVp so that it will be reevaluated.
		 * Just decrement the reference count. This only occurs during
		 * initial cell setup and can panic the machine if we set the
		 * count to zero and fs checkv is executed when the current
		 * directory is /afs.
		 */
#ifdef AFS_LINUX20_ENV
		{
		    struct vrequest *treq = NULL;
		    struct vattr vattr;
		    cred_t *credp;
		    struct dentry *dp;
		    struct vcache *vcp;

		    afs_rootFid.Fid.Volume = volid;
		    afs_rootFid.Fid.Vnode = 1;
		    afs_rootFid.Fid.Unique = 1;

		    credp = crref();
		    if (afs_CreateReq(&treq, credp))
			goto out;
		    vcp = afs_GetVCache(&afs_rootFid, treq, NULL, NULL);
		    if (!vcp)
			goto out;
		    afs_getattr(vcp, &vattr, credp);
		    afs_fill_inode(AFSTOV(vcp), &vattr);

		    dp = d_find_alias(AFSTOV(afs_globalVp));

#if defined(AFS_LINUX24_ENV)
#if defined(HAVE_DCACHE_LOCK)
		    spin_lock(&dcache_lock);
#else
		    spin_lock(&AFSTOV(vcp)->i_lock);
#endif
#if defined(AFS_LINUX26_ENV)
		    spin_lock(&dp->d_lock);
#endif
#endif
#if defined(D_ALIAS_IS_HLIST)
		    hlist_del_init(&dp->d_alias);
		    hlist_add_head(&dp->d_alias, &(AFSTOV(vcp)->i_dentry));
#else
		    list_del_init(&dp->d_alias);
		    list_add(&dp->d_alias, &(AFSTOV(vcp)->i_dentry));
#endif
		    dp->d_inode = AFSTOV(vcp);
#if defined(AFS_LINUX24_ENV)
#if defined(AFS_LINUX26_ENV)
		    spin_unlock(&dp->d_lock);
#endif
#if defined(HAVE_DCACHE_LOCK)
		    spin_unlock(&dcache_lock);
#else
		    spin_unlock(&AFSTOV(vcp)->i_lock);
#endif
#endif
		    dput(dp);

		    AFS_FAST_RELE(afs_globalVp);
		    afs_globalVp = vcp;
		out:
		    crfree(credp);
		    afs_DestroyReq(treq);
		}
#else
#ifdef AFS_DARWIN80_ENV
		afs_PutVCache(afs_globalVp);
#else
		AFS_FAST_RELE(afs_globalVp);
#endif
		afs_globalVp = 0;
#endif
	    }
	    afs_rootFid.Fid.Volume = volid;
	    afs_rootFid.Fid.Vnode = 1;
	    afs_rootFid.Fid.Unique = 1;
	}
    }
    if (tvp) {
	afs_initState = 300;	/* won */
	afs_osi_Wakeup(&afs_initState);
	afs_PutVolume(tvp, READ_LOCK);
    }
    if (afs_rootFid.Fid.Volume)
	return 0;
    else
	return ENOENT;
}
Exemplo n.º 21
0
void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian)
{
	hlist_del_init(&kian->link);
}