コード例 #1
0
/*
 * Notify host that there are data pending on our TX bufring.
 */
static __inline void
vmbus_chan_signal_tx(const struct vmbus_channel *chan)
{
	atomic_set_long(chan->ch_evtflag, chan->ch_evtflag_mask);
	if (chan->ch_txflags & VMBUS_CHAN_TXF_HASMNF)
		atomic_set_int(chan->ch_montrig, chan->ch_montrig_mask);
	else
		hypercall_signal_event(chan->ch_monprm_dma.hv_paddr);
}
コード例 #2
0
ファイル: t_cancel.c プロジェクト: AndreyRybkin/kamailio
/* should be called directly only if one of the condition bellow is true:
 *  - prepare_cancel_branch or prepare_to_cancel returned true for this branch
 *  - buffer value was 0 and then set to BUSY in an atomic op.:
 *     if (atomic_cmpxchg_long(&buffer, 0, BUSY_BUFFER)==0).
 *
 * params:  t - transaction
 *          branch - branch number to be canceled
 *          reason - cancel reason structure
 *          flags - howto cancel: 
 *                   F_CANCEL_B_KILL - will completely stop the 
 *                     branch (stops the timers), use with care
 *                   F_CANCEL_B_FAKE_REPLY - will send a fake 487
 *                      to all branches that haven't received any response
 *                      (>=100). It assumes the REPLY_LOCK is not held
 *                      (if it is => deadlock)
 *                  F_CANCEL_B_FORCE_C - will send a cancel (and create the 
 *                       corresp. local cancel rb) even if no reply was 
 *                       received; F_CANCEL_B_FAKE_REPLY will be ignored.
 *                  F_CANCEL_B_FORCE_RETR - don't stop retransmission if no 
 *                       reply was received on the branch; incompatible
 *                       with F_CANCEL_B_FAKE_REPLY, F_CANCEL_B_FORCE_C and
 *                       F_CANCEL_B_KILL (all of them take precedence) a
 *                  default: stop only the retransmissions for the branch
 *                      and leave it to timeout if it doesn't receive any
 *                      response to the CANCEL
 * returns: 0 - branch inactive after running cancel_branch() 
 *          1 - branch still active  (fr_timer)
 *         -1 - error
 * WARNING:
 *          - F_CANCEL_B_KILL should be used only if the transaction is killed
 *            explicitly afterwards (since it might kill all the timers
 *            the transaction won't be able to "kill" itself => if not
 *            explicitly "put_on_wait" it might live forever)
 *          - F_CANCEL_B_FAKE_REPLY must be used only if the REPLY_LOCK is not
 *            held
 *          - checking for buffer==0 under REPLY_LOCK is no enough, an 
 *           atomic_cmpxhcg or atomic_get_and_set _must_ be used.
 */
int cancel_branch( struct cell *t, int branch,
	#ifdef CANCEL_REASON_SUPPORT
					struct cancel_reason* reason,
	#endif /* CANCEL_REASON_SUPPORT */
					int flags )
{
	char *cancel;
	unsigned int len;
	struct retr_buf *crb, *irb;
	int ret;
	struct cancel_info tmp_cd;
	void* pcbuf;

	crb=&t->uac[branch].local_cancel;
	irb=&t->uac[branch].request;
	irb->flags|=F_RB_CANCELED;
	ret=1;
	init_cancel_info(&tmp_cd);

#	ifdef EXTRA_DEBUG
	if (crb->buffer!=BUSY_BUFFER) {
		LOG(L_CRIT, "ERROR: attempt to rewrite cancel buffer: %p\n",
				crb->buffer);
		abort();
	}
#	endif

	if (flags & F_CANCEL_B_KILL){
		stop_rb_timers( irb );
		ret=0;
		if ((t->uac[branch].last_received < 100) &&
				!(flags & F_CANCEL_B_FORCE_C)) {
			DBG("DEBUG: cancel_branch: no response ever received: "
			    "giving up on cancel\n");
			/* remove BUSY_BUFFER -- mark cancel buffer as not used */
			pcbuf=&crb->buffer; /* workaround for type punning warnings */
			atomic_set_long(pcbuf, 0);
			/* try to relay auto-generated 487 canceling response only when
			 * another one is not under relaying on the branch and there is
			 * no forced response per transaction from script */
			if((flags & F_CANCEL_B_FAKE_REPLY)
					&& !(irb->flags&F_RB_RELAYREPLY)
					&& !(t->flags&T_ADMIN_REPLY)) {
				LOCK_REPLIES(t);
				if (relay_reply(t, FAKED_REPLY, branch, 487, &tmp_cd, 1) == 
										RPS_ERROR){
					return -1;
				}
			}
			/* do nothing, hope that the caller will clean up */
			return ret;
		}
	}else{
		if (t->uac[branch].last_received < 100){
			if (!(flags & F_CANCEL_B_FORCE_C)) {
				/* no response received => don't send a cancel on this branch,
				 *  just drop it */
				if (!(flags & F_CANCEL_B_FORCE_RETR))
					stop_rb_retr(irb); /* stop retransmissions */
				/* remove BUSY_BUFFER -- mark cancel buffer as not used */
				pcbuf=&crb->buffer; /* workaround for type punning warnings */
				atomic_set_long(pcbuf, 0);
				if (flags & F_CANCEL_B_FAKE_REPLY){
					stop_rb_timers( irb ); /* stop even the fr timer */
					LOCK_REPLIES(t);
					if (relay_reply(t, FAKED_REPLY, branch, 487, &tmp_cd, 1)== 
											RPS_ERROR){
						return -1;
					}
					return 0; /* should be inactive after the 487 */
				}
				/* do nothing, just wait for the final timeout */
				return 1;
			}
		}
		stop_rb_retr(irb); /* stop retransmissions */
	}

	if (cfg_get(tm, tm_cfg, reparse_invite) ||
			(t->uas.request && t->uas.request->msg_flags&(FL_USE_UAC_FROM|FL_USE_UAC_TO))) {
		/* build the CANCEL from the INVITE which was sent out */
		cancel = build_local_reparse(t, branch, &len, CANCEL, CANCEL_LEN,
									 (t->uas.request && t->uas.request->msg_flags&FL_USE_UAC_TO)?0:&t->to
	#ifdef CANCEL_REASON_SUPPORT
									 , reason
	#endif /* CANCEL_REASON_SUPPORT */
									 );
	} else {
		/* build the CANCEL from the received INVITE */
		cancel = build_local(t, branch, &len, CANCEL, CANCEL_LEN, &t->to
	#ifdef CANCEL_REASON_SUPPORT
								, reason
	#endif /* CANCEL_REASON_SUPPORT */
								);
	}
	if (!cancel) {
		LOG(L_ERR, "ERROR: attempt to build a CANCEL failed\n");
		/* remove BUSY_BUFFER -- mark cancel buffer as not used */
		pcbuf=&crb->buffer; /* workaround for type punning warnings */
		atomic_set_long(pcbuf, 0);
		return -1;
	}
	/* install cancel now */
	crb->dst = irb->dst;
	crb->branch = branch;
	/* label it as cancel so that FR timer can better know how to
	   deal with it */
	crb->activ_type = TYPE_LOCAL_CANCEL;
	/* be extra carefully and check for bugs (the below if could be replaced
	 *  by an atomic_set((void*)&crb->buffer, cancel) */
	if (unlikely(atomic_cmpxchg_long((void*)&crb->buffer, (long)BUSY_BUFFER,
							(long)cancel)!= (long)BUSY_BUFFER)){
		BUG("tm: cancel_branch: local_cancel buffer=%p != BUSY_BUFFER"
				" (trying to continue)\n", crb->buffer);
		shm_free(cancel);
		return -1;
	}
	membar_write_atomic_op(); /* cancel retr. can be called from 
								 reply_received w/o the reply lock held => 
								 they check for buffer_len to 
								 see if a valid reply exists */
	crb->buffer_len = len;

	DBG("DEBUG: cancel_branch: sending cancel...\n");
	if (SEND_BUFFER( crb )>=0){
		if (unlikely (has_tran_tmcbs(t, TMCB_REQUEST_OUT)))
			run_trans_callbacks_with_buf(TMCB_REQUEST_OUT, crb, t->uas.request, 0, TMCB_LOCAL_F);
		if (unlikely (has_tran_tmcbs(t, TMCB_REQUEST_SENT)))
			run_trans_callbacks_with_buf(TMCB_REQUEST_SENT, crb, t->uas.request, 0, TMCB_LOCAL_F);
	}
	/*sets and starts the FINAL RESPONSE timer */
	if (start_retr( crb )!=0)
		LOG(L_CRIT, "BUG: cancel_branch: failed to start retransmission"
					" for %p\n", crb);
	return ret;
}
コード例 #3
0
/*
 * Attempt to acquire a shared or exclusive token.  Returns TRUE on success,
 * FALSE on failure.
 *
 * If TOK_EXCLUSIVE is set in mode we are attempting to get an exclusive
 * token, otherwise are attempting to get a shared token.
 *
 * If TOK_EXCLREQ is set in mode this is a blocking operation, otherwise
 * it is a non-blocking operation (for both exclusive or shared acquisions).
 */
static __inline
int
_lwkt_trytokref(lwkt_tokref_t ref, thread_t td, long mode)
{
	lwkt_token_t tok;
	lwkt_tokref_t oref;
	long count;

	tok = ref->tr_tok;
	KASSERT(((mode & TOK_EXCLREQ) == 0 ||	/* non blocking */
		td->td_gd->gd_intr_nesting_level == 0 ||
		panic_cpu_gd == mycpu),
		("Attempt to acquire token %p not already "
		"held in hard code section", tok));

	if (mode & TOK_EXCLUSIVE) {
		/*
		 * Attempt to get an exclusive token
		 */
		for (;;) {
			count = tok->t_count;
			oref = tok->t_ref;	/* can be NULL */
			cpu_ccfence();
			if ((count & ~TOK_EXCLREQ) == 0) {
				/*
				 * It is possible to get the exclusive bit.
				 * We must clear TOK_EXCLREQ on successful
				 * acquisition.
				 */
				if (atomic_cmpset_long(&tok->t_count, count,
						       (count & ~TOK_EXCLREQ) |
						       TOK_EXCLUSIVE)) {
					KKASSERT(tok->t_ref == NULL);
					tok->t_ref = ref;
					return TRUE;
				}
				/* retry */
			} else if ((count & TOK_EXCLUSIVE) &&
				   oref >= &td->td_toks_base &&
				   oref < td->td_toks_stop) {
				/*
				 * Our thread already holds the exclusive
				 * bit, we treat this tokref as a shared
				 * token (sorta) to make the token release
				 * code easier.
				 *
				 * NOTE: oref cannot race above if it
				 *	 happens to be ours, so we're good.
				 *	 But we must still have a stable
				 *	 variable for both parts of the
				 *	 comparison.
				 *
				 * NOTE: Since we already have an exclusive
				 *	 lock and don't need to check EXCLREQ
				 *	 we can just use an atomic_add here
				 */
				atomic_add_long(&tok->t_count, TOK_INCR);
				ref->tr_count &= ~TOK_EXCLUSIVE;
				return TRUE;
			} else if ((mode & TOK_EXCLREQ) &&
				   (count & TOK_EXCLREQ) == 0) {
				/*
				 * Unable to get the exclusive bit but being
				 * asked to set the exclusive-request bit.
				 * Since we are going to retry anyway just
				 * set the bit unconditionally.
				 */
				atomic_set_long(&tok->t_count, TOK_EXCLREQ);
				return FALSE;
			} else {
				/*
				 * Unable to get the exclusive bit and not
				 * being asked to set the exclusive-request
				 * (aka lwkt_trytoken()), or EXCLREQ was
				 * already set.
				 */
				cpu_pause();
				return FALSE;
			}
			/* retry */
		}
	} else {
		/*
		 * Attempt to get a shared token.  Note that TOK_EXCLREQ
		 * for shared tokens simply means the caller intends to
		 * block.  We never actually set the bit in tok->t_count.
		 */
		for (;;) {
			count = tok->t_count;
			oref = tok->t_ref;	/* can be NULL */
			cpu_ccfence();
			if ((count & (TOK_EXCLUSIVE/*|TOK_EXCLREQ*/)) == 0) {
				/*
				 * It may be possible to get the token shared.
				 */
				if ((atomic_fetchadd_long(&tok->t_count, TOK_INCR) & TOK_EXCLUSIVE) == 0) {
					return TRUE;
				}
				atomic_fetchadd_long(&tok->t_count, -TOK_INCR);
				/* retry */
			} else if ((count & TOK_EXCLUSIVE) &&
				   oref >= &td->td_toks_base &&
				   oref < td->td_toks_stop) {
				/*
				 * We own the exclusive bit on the token so
				 * we can in fact also get it shared.
				 */
				atomic_add_long(&tok->t_count, TOK_INCR);
				return TRUE;
			} else {
				/*
				 * We failed to get the token shared
				 */
				return FALSE;
			}
			/* retry */
		}
	}
}