Пример #1
0
/*! \brief Select loop function for write queue handling
 *  \param[in] fd osmocom file descriptor
 *  \param[in] what bit-mask of events that have happened
 *
 * This function is provided so that it can be registered with the
 * select loop abstraction code (\ref osmo_fd::cb).
 */
int osmo_wqueue_bfd_cb(struct osmo_fd *fd, unsigned int what)
{
	struct osmo_wqueue *queue;

	queue = container_of(fd, struct osmo_wqueue, bfd);

	if (what & BSC_FD_READ)
		queue->read_cb(fd);

	if (what & BSC_FD_EXCEPT)
		queue->except_cb(fd);

	if (what & BSC_FD_WRITE) {
		struct msgb *msg;

		fd->when &= ~BSC_FD_WRITE;

		/* the queue might have been emptied */
		if (!llist_empty(&queue->msg_queue)) {
			--queue->current_length;

			msg = msgb_dequeue(&queue->msg_queue);
			queue->write_cb(fd, msg);
			msgb_free(msg);

			if (!llist_empty(&queue->msg_queue))
				fd->when |= BSC_FD_WRITE;
		}
	}

	return 0;
}
Пример #2
0
bool irq_work_needs_cpu(void)
{
	struct llist_head *raised, *lazy;

	raised = &__get_cpu_var(raised_list);
	lazy = &__get_cpu_var(lazy_list);
	if (llist_empty(raised) && llist_empty(lazy))
		return false;

	/* All work should have been flushed before going offline */
	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));

	return true;
}
Пример #3
0
/*! \brief Terminate all child FSM instances of an FSM instance.
 *
 *  Iterate over all children and send them a termination event, with the given
 *  cause. Pass OSMO_FSM_TERM_PARENT to avoid dispatching events from the
 *  terminated child FSMs.
 *
 *  \param[in] fi FSM instance that should be cleared of child FSMs
 *  \param[in] cause Cause / reason for termination (OSMO_FSM_TERM_PARENT)
 *  \param[in] data Opaque event data to be passed with the parent term events
 *  \param[in] file Calling source file (from osmo_fsm_inst_term_children macro)
 *  \param[in] line Calling source line (from osmo_fsm_inst_term_children macro)
 */
void _osmo_fsm_inst_term_children(struct osmo_fsm_inst *fi,
				  enum osmo_fsm_term_cause cause,
				  void *data,
				  const char *file, int line)
{
	struct osmo_fsm_inst *first_child, *last_seen_first_child;

	/* iterate over all children, starting from the beginning every time:
	 * terminating an FSM may emit events that cause other FSMs to also
	 * terminate and remove themselves from this list. */
	last_seen_first_child = NULL;
	while (!llist_empty(&fi->proc.children)) {
		first_child = llist_entry(fi->proc.children.next,
					  typeof(*first_child),
					  proc.child);

		/* paranoia: do not loop forever */
		if (first_child == last_seen_first_child) {
			LOGPFSMLSRC(fi, LOGL_ERROR, file, line,
				    "Internal error while terminating child"
				    " FSMs: a child FSM is stuck\n");
			break;
		}
		last_seen_first_child = first_child;

		/* terminate child */
		_osmo_fsm_inst_term(first_child, cause, data,
				    file, line);
	}
}
Пример #4
0
/*
 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
 * context with local IRQs disabled.
 */
void irq_work_run(void)
{
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty(this_list))
		return;

	BUG_ON(!in_irq());
	BUG_ON(!irqs_disabled());

	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 */
		work->flags = IRQ_WORK_BUSY;
		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
	}
}
Пример #5
0
static int ipa_client_write_default_cb(struct ipa_client_conn *link)
{
	struct osmo_fd *ofd = link->ofd;
	struct msgb *msg;
	struct llist_head *lh;
	int ret;

	LOGIPA(link, LOGL_DEBUG, "sending data\n");

	if (llist_empty(&link->tx_queue)) {
		ofd->when &= ~BSC_FD_WRITE;
		return 0;
	}
	lh = link->tx_queue.next;
	llist_del(lh);
	msg = llist_entry(lh, struct msgb, list);

	ret = send(link->ofd->fd, msg->data, msg->len, 0);
	if (ret < 0) {
		if (errno == EPIPE || errno == ENOTCONN) {
			ipa_client_conn_close(link);
			if (link->updown_cb)
				link->updown_cb(link, 0);
		}
		LOGIPA(link, LOGL_ERROR, "error to send\n");
	}
	msgb_free(msg);
	return 0;
}
Пример #6
0
int write_queue_bfd_cb(struct bsc_fd *fd, unsigned int what)
{
	struct write_queue *queue;

	queue = container_of(fd, struct write_queue, bfd);

	if (what & BSC_FD_READ)
		queue->read_cb(fd);

	if (what & BSC_FD_EXCEPT)
		queue->except_cb(fd);

	if (what & BSC_FD_WRITE) {
		struct msgb *msg;

		fd->when &= ~BSC_FD_WRITE;
		msg = msgb_dequeue(&queue->msg_queue);
		if (!msg)
			return -1;

		--queue->current_length;
		queue->write_cb(fd, msg);
		msgb_free(msg);

		if (!llist_empty(&queue->msg_queue))
			fd->when |= BSC_FD_WRITE;
	}

	return 0;
}
Пример #7
0
static void wait_pending_timeout(struct gsmd_timer *tmr, void *data)
{
	struct gsmd *g = data;
	struct gsmd_atcmd *cmd = NULL;
	u_int8_t channel = 0;
	int found = 0;

	for (channel = 0; channel < GSMD_MAX_CHANNELS; channel++) {
		if (&g->chl_100ms_wait[channel] == tmr) {
			found = 1;
			break;
		}
	}
	if (!found) {
		gsmd_log(GSMD_ERROR, "Unknown 100ms timeout\n");
		return;
	}
	g->chl_100ms_wait[channel].cb = NULL;

	if (!llist_empty(&g->pending_atcmds[channel])) {
		atcmd_wake_pending_queue(g,channel);
	} else {
		DEBUGP("Nothing more to send\n");
	}
}
Пример #8
0
/*
 * We got the channel assigned and can now hand this channel
 * over to one of our callbacks.
 */
static int subscr_paging_cb(unsigned int hooknum, unsigned int event,
			     struct msgb *msg, void *data, void *param)
{
	struct subscr_request *request;
	struct gsm_subscriber *subscr = (struct gsm_subscriber *)param;

	/* There is no request anymore... */
	if (llist_empty(&subscr->requests))
		return -1;

	/*
	 * FIXME: What to do with paging requests coming during
	 * this callback? We must be sure to not start paging when
	 * we have an active connection to a subscriber and to make
	 * the subscr_put_channel work as required...
	 */
	request = (struct subscr_request *)subscr->requests.next;
	llist_del(&request->entry);
	subscr->in_callback = 1;
	request->cbfn(hooknum, event, msg, data, request->param);
	subscr->in_callback = 0;

	subscr_put(subscr);
	talloc_free(request);
	return 0;
}
Пример #9
0
static void test_fc(uint32_t bucket_size_max, uint32_t bucket_leak_rate,
		    uint32_t max_queue_depth, uint32_t pdu_len,
		    uint32_t pdu_count)
{
	struct bssgp_flow_control *fc = talloc_zero(NULL, struct bssgp_flow_control);
	int i;

	bssgp_fc_init(fc, bucket_size_max, bucket_leak_rate, max_queue_depth,
		      fc_out_cb);

	osmo_gettimeofday(&tv_start, NULL);

	for (i = 0; i < pdu_count; i++) {
		fc_in(fc, pdu_len);
		osmo_timers_check();
		osmo_timers_prepare();
		osmo_timers_update();
	}

	while (1) {
		usleep(100000);
		osmo_timers_check();
		osmo_timers_prepare();
		osmo_timers_update();

		if (llist_empty(&fc->queue))
			break;
	}
}
Пример #10
0
void subscr_get_channel(struct gsm_subscriber *subscr,
			int type, gsm_cbfn *cbfn, void *param)
{
	struct subscr_request *request;

	request = talloc(tall_sub_req_ctx, struct subscr_request);
	if (!request) {
		if (cbfn)
			cbfn(GSM_HOOK_RR_PAGING, GSM_PAGING_OOM,
				NULL, NULL, param);
		return;
	}

	memset(request, 0, sizeof(*request));
	request->subscr = subscr_get(subscr);
	request->channel_type = type;
	request->cbfn = cbfn;
	request->param = param;

	/*
	 * FIXME: We might be able to assign more than one
	 * channel, e.g. voice and SMS submit at the same
	 * time.
	 */
	if (!subscr->in_callback && llist_empty(&subscr->requests)) {
		/* add to the list, send a request */
		llist_add_tail(&request->entry, &subscr->requests);
		subscr_send_paging_request(subscr);
	} else {
		/* this will be picked up later, from subscr_put_channel */
		llist_add_tail(&request->entry, &subscr->requests);
	}
}
Пример #11
0
void subscr_put_channel(struct gsm_lchan *lchan)
{
	/*
	 * FIXME: Continue with other requests now... by checking
	 * the gsm_subscriber inside the gsm_lchan. Drop the ref count
	 * of the lchan after having asked the next requestee to handle
	 * the channel.
	 */
	/*
	 * FIXME: is the lchan is of a different type we could still
	 * issue an immediate assignment for another channel and then
	 * close this one.
	 */
	/*
	 * Currently we will drop the last ref of the lchan which
	 * will result in a channel release on RSL and we will start
	 * the paging. This should work most of the time as the MS
	 * will listen to the paging requests before we timeout
	 */

	put_lchan(lchan);

	if (lchan->subscr && !llist_empty(&lchan->subscr->requests))
		subscr_send_paging_request(lchan->subscr);
}
Пример #12
0
static void irq_work_run_list(struct llist_head *list)
{
	unsigned long flags;
	struct irq_work *work;
	struct llist_node *llnode;

	BUG_ON(!irqs_disabled());

	if (llist_empty(list))
		return;

	llnode = llist_del_all(list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 * Make it immediately visible so that other CPUs trying
		 * to claim that work don't rely on us to handle their data
		 * while we are in the middle of the func.
		 */
		flags = work->flags & ~IRQ_WORK_PENDING;
		xchg(&work->flags, flags);

		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
	}
}
Пример #13
0
/*! \brief Select loop function for write queue handling
 *  \param[in] fd osmocom file descriptor
 *  \param[in] what bit-mask of events that have happened
 *
 * This function is provided so that it can be registered with the
 * select loop abstraction code (\ref osmo_fd::cb).
 */
int osmo_wqueue_bfd_cb(struct osmo_fd *fd, unsigned int what)
{
	struct osmo_wqueue *queue;
	int rc;

	queue = container_of(fd, struct osmo_wqueue, bfd);

	if (what & BSC_FD_READ) {
		rc = queue->read_cb(fd);
		if (rc == -EBADF)
			goto err_badfd;
	}

	if (what & BSC_FD_EXCEPT) {
		rc = queue->except_cb(fd);
		if (rc == -EBADF)
			goto err_badfd;
	}

	if (what & BSC_FD_WRITE) {
		struct msgb *msg;

		fd->when &= ~BSC_FD_WRITE;

		/* the queue might have been emptied */
		if (!llist_empty(&queue->msg_queue)) {
			--queue->current_length;

			msg = msgb_dequeue(&queue->msg_queue);
			rc = queue->write_cb(fd, msg);
			msgb_free(msg);

			if (rc == -EBADF)
				goto err_badfd;

			if (!llist_empty(&queue->msg_queue))
				fd->when |= BSC_FD_WRITE;
		}
	}

err_badfd:
	/* Return value is not checked in osmo_select_main() */
	return 0;
}
Пример #14
0
/*! \brief delete a gsm timer from timer management
 *  \param[in] timer the to-be-deleted timer
 *
 * This function can be used to delete a previously added/scheduled
 * timer from the timer management code.
 */
void osmo_gsm_timer_del(struct osmo_gsm_timer_list *timer)
{
	if (timer->active) {
		timer->active = 0;
		rb_erase(&timer->node, &timer_root);
		/* make sure this is not already scheduled for removal. */
		if (!llist_empty(&timer->list))
			llist_del_init(&timer->list);
	}
}
Пример #15
0
/*! \brief Clear a \ref osmo_wqueue
 *  \param[in] queue Write queue to be cleared
 *
 * This function will clear (remove/release) all messages in it.
 */
void osmo_wqueue_clear(struct osmo_wqueue *queue)
{
	while (!llist_empty(&queue->msg_queue)) {
		struct msgb *msg = msgb_dequeue(&queue->msg_queue);
		msgb_free(msg);
	}

	queue->current_length = 0;
	queue->bfd.when &= ~BSC_FD_WRITE;
}
Пример #16
0
/* return the requested number of bits from the specified subchannel */
static int get_subch_bits(struct subch_mux *mx, int subch,
			  u_int8_t *bits, int num_requested)
{
	struct mux_subch *sch = &mx->subch[subch];
	int num_bits = 0;

	while (num_bits < num_requested) {
		struct subch_txq_entry *txe;
		int num_bits_left;
		int num_bits_thistime;

		/* make sure we have a valid entry at top of tx queue.
		 * if not, add an idle frame */
		if (llist_empty(&sch->tx_queue))
			alloc_add_idle_frame(mx, subch);
	
		if (llist_empty(&sch->tx_queue))
			return -EIO;

		txe = llist_entry(sch->tx_queue.next, struct subch_txq_entry, list);
		num_bits_left = txe->bit_len - txe->next_bit;

		if (num_bits_left < num_requested)
			num_bits_thistime = num_bits_left;
		else
			num_bits_thistime = num_requested;

		/* pull the bits from the txe */
		memcpy(bits + num_bits, txe->bits + txe->next_bit, num_bits_thistime);
		txe->next_bit += num_bits_thistime;

		/* free the tx_queue entry if it is fully consumed */
		if (txe->next_bit >= txe->bit_len) {
			llist_del(&txe->list);
			talloc_free(txe);
		}

		/* increment global number of bits dequeued */
		num_bits += num_bits_thistime;
	}

	return num_requested;
}
Пример #17
0
/*! \brief Dequeue message buffer from head of queue
 * \param[in] queue linked list header of queue
 * \returns message buffer (if any) or NULL if queue empty
 *
 * The function will remove the first message buffer from the queue
 * implemented by \ref llist_head \a queue.
 */
struct msgb *msgb_dequeue(struct llist_head *queue)
{
	struct llist_head *lh;

	if (llist_empty(queue))
		return NULL;

	lh = queue->next;
	llist_del(lh);
	
	return llist_entry(lh, struct msgb, list);
}
Пример #18
0
size_t ipa_client_conn_clear_queue(struct ipa_client_conn *link)
{
	size_t deleted = 0;

	while (!llist_empty(&link->tx_queue)) {
		struct msgb *msg = msgb_dequeue(&link->tx_queue);
		msgb_free(msg);
		deleted += 1;
	}

	link->ofd->when &= ~BSC_FD_WRITE;
	return deleted;
}
Пример #19
0
bool irq_work_needs_cpu(void)
{
	struct llist_head *this_list;

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty(this_list))
		return false;

	/* All work should have been flushed before going offline */
	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));

	return true;
}
Пример #20
0
/* evict the 'num_evict' number of oldest entries in the queue */
static void tx_queue_evict(struct mux_subch *sch, int num_evict)
{
	struct subch_txq_entry *tqe;
	int i;

	for (i = 0; i < num_evict; i++) {
		if (llist_empty(&sch->tx_queue))
			return;

		tqe = llist_entry(sch->tx_queue.next, struct subch_txq_entry, list);
		llist_del(&tqe->list);
		talloc_free(tqe);
	}
}
Пример #21
0
static int pcu_sock_write(struct osmo_fd *bfd)
{
	struct pcu_sock_state *state = (struct pcu_sock_state *)bfd->data;
	int rc;

	while (!llist_empty(&state->upqueue)) {
		struct msgb *msg, *msg2;
		struct gsm_pcu_if *pcu_prim;

		/* peek at the beginning of the queue */
		msg = llist_entry(state->upqueue.next, struct msgb, list);
		pcu_prim = (struct gsm_pcu_if *)msg->data;

		bfd->when &= ~BSC_FD_WRITE;

		/* bug hunter 8-): maybe someone forgot msgb_put(...) ? */
		if (!msgb_length(msg)) {
			LOGP(DL1IF, LOGL_ERROR, "message type (%d) with ZERO "
				"bytes!\n", pcu_prim->msg_type);
			goto dontsend;
		}

		/* try to send it over the socket */
		rc = write(bfd->fd, msgb_data(msg), msgb_length(msg));
		if (rc == 0)
			goto close;
		if (rc < 0) {
			if (errno == EAGAIN) {
				bfd->when |= BSC_FD_WRITE;
				break;
			}
			goto close;
		}

dontsend:
		/* _after_ we send it, we can deueue */
		msg2 = msgb_dequeue(&state->upqueue);
		assert(msg == msg2);
		msgb_free(msg);
	}
	return 0;

close:
	pcu_sock_close(state, 1);

	return -1;
}
Пример #22
0
static void subscr_send_paging_request(struct gsm_subscriber *subscr)
{
	struct subscr_request *request;
	int rc;

	assert(!llist_empty(&subscr->requests));

	request = (struct subscr_request *)subscr->requests.next;
	rc = paging_request(subscr->net, subscr, request->channel_type,
			    subscr_paging_cb, subscr);

	/* paging failed, quit now */
	if (rc <= 0) {
		subscr_paging_cb(GSM_HOOK_RR_PAGING, GSM_PAGING_EXPIRED,
				 NULL, NULL, subscr);
	}
}
Пример #23
0
struct req_ctx *req_ctx_dequeue(struct llist_head *list)
{
	unsigned long flags;
	struct req_ctx *rctx;

	local_irq_save(flags);
	if (llist_empty(list)) {
		local_irq_restore(flags);
		return NULL;
	}

	rctx = llist_entry(list->next, struct req_ctx, list);
	llist_del(&rctx->list);
	local_irq_restore(flags);

	return rctx;
}
Пример #24
0
static void __irq_work_run(void)
{
	unsigned long flags;
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;


	/*
	 * Reset the "raised" state right before we check the list because
	 * an NMI may enqueue after we find the list empty from the runner.
	 */
	__this_cpu_write(irq_work_raised, 0);
	barrier();

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty(this_list))
		return;

	BUG_ON(!irqs_disabled());

	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 * Make it immediately visible so that other CPUs trying
		 * to claim that work don't rely on us to handle their data
		 * while we are in the middle of the func.
		 */
		flags = work->flags & ~IRQ_WORK_PENDING;
		xchg(&work->flags, flags);

		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
	}
}
Пример #25
0
/* send first ctrl message and start timer */
static void trx_ctrl_send(struct trx_l1h *l1h)
{
	struct trx_ctrl_msg *tcm;

	/* get first command */
	if (llist_empty(&l1h->trx_ctrl_list))
		return;
	tcm = llist_entry(l1h->trx_ctrl_list.next, struct trx_ctrl_msg, list);

	LOGP(DTRX, LOGL_DEBUG, "Sending control '%s' to %s\n", tcm->cmd,
		phy_instance_name(l1h->phy_inst));
	/* send command */
	send(l1h->trx_ofd_ctrl.fd, tcm->cmd, strlen(tcm->cmd)+1, 0);

	/* start timer */
	l1h->trx_ctrl_timer.cb = trx_ctrl_timer_cb;
	l1h->trx_ctrl_timer.data = l1h;
	osmo_timer_schedule(&l1h->trx_ctrl_timer, 2, 0);
}
Пример #26
0
Файл: smp.c Проект: Lyude/linux
/**
 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
 *
 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
 *		      offline CPU. Skip this check if set to 'false'.
 *
 * Flush any pending smp-call-function callbacks queued on this CPU. This is
 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
 * to ensure that all pending IPI callbacks are run before it goes completely
 * offline.
 *
 * Loop through the call_single_queue and run all the queued callbacks.
 * Must be called with interrupts disabled.
 */
static void flush_smp_call_function_queue(bool warn_cpu_offline)
{
	struct llist_head *head;
	struct llist_node *entry;
	call_single_data_t *csd, *csd_next;
	static bool warned;

	lockdep_assert_irqs_disabled();

	head = this_cpu_ptr(&call_single_queue);
	entry = llist_del_all(head);
	entry = llist_reverse_order(entry);

	/* There shouldn't be any pending callbacks on an offline CPU. */
	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
		     !warned && !llist_empty(head))) {
		warned = true;
		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());

		/*
		 * We don't have to use the _safe() variant here
		 * because we are not invoking the IPI handlers yet.
		 */
		llist_for_each_entry(csd, entry, llist)
			pr_warn("IPI callback %pS sent to offline CPU\n",
				csd->func);
	}

	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
		smp_call_func_t func = csd->func;
		void *info = csd->info;

		/* Do we wait until *after* callback? */
		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
			func(info);
			csd_unlock(csd);
		} else {
			csd_unlock(csd);
			func(info);
		}
	}
Пример #27
0
/**
 * Dequeues a single primitive of required type
 * from a specified transmit queue.
 *
 * @param  queue      a transmit queue to take a prim from
 * @param  lchan_type required primitive type
 * @return            a primitive or NULL if not found
 */
struct trx_ts_prim *sched_prim_dequeue(struct llist_head *queue,
	enum trx_lchan_type lchan_type)
{
	struct trx_ts_prim *prim;

	/* There is nothing to dequeue */
	if (llist_empty(queue))
		return NULL;

	/* TCH requires FACCH prioritization, so handle it separately */
	if (CHAN_IS_TCH(lchan_type))
		return sched_prim_dequeue_tch(queue);

	llist_for_each_entry(prim, queue, list) {
		if (prim->chan == lchan_type) {
			llist_del(&prim->list);
			return prim;
		}
	}

	return NULL;
}
Пример #28
0
/* add a new ctrl command */
static int trx_ctrl_cmd(struct trx_l1h *l1h, int critical, const char *cmd,
	const char *fmt, ...)
{
	struct trx_ctrl_msg *tcm;
	va_list ap;
	int l, pending = 0;

	if (!transceiver_available &&
	    !(!strcmp(cmd, "POWEROFF") || !strcmp(cmd, "POWERON"))) {
		LOGP(DTRX, LOGL_ERROR, "CTRL %s ignored: No clock from "
		     "transceiver, please fix!\n", cmd);
		return -EIO;
	}

	if (!llist_empty(&l1h->trx_ctrl_list))
		pending = 1;

	/* create message */
	tcm = talloc_zero(tall_bts_ctx, struct trx_ctrl_msg);
	if (!tcm)
		return -ENOMEM;
	if (fmt && fmt[0]) {
		l = snprintf(tcm->cmd, sizeof(tcm->cmd)-1, "CMD %s ", cmd);
		va_start(ap, fmt);
		vsnprintf(tcm->cmd + l, sizeof(tcm->cmd) - l - 1, fmt, ap);
		va_end(ap);
	} else
		snprintf(tcm->cmd, sizeof(tcm->cmd)-1, "CMD %s", cmd);
	tcm->cmd_len = strlen(cmd);
	tcm->critical = critical;
	llist_add_tail(&tcm->list, &l1h->trx_ctrl_list);
	LOGP(DTRX, LOGL_INFO, "Adding new control '%s'\n", tcm->cmd);

	/* send message, if no pending message */
	if (!pending)
		trx_ctrl_send(l1h);

	return 0;
}
Пример #29
0
static void pcu_sock_close(struct pcu_sock_state *state, int lost)
{
	struct osmo_fd *bfd = &state->conn_bfd;
	struct gprs_rlcmac_bts *bts = bts_main_data();
	uint8_t trx, ts;

	LOGP(DL1IF, LOGL_NOTICE, "PCU socket has %s connection\n",
		(lost) ? "LOST" : "closed");

	close(bfd->fd);
	bfd->fd = -1;
	osmo_fd_unregister(bfd);

	/* flush the queue */
	while (!llist_empty(&state->upqueue)) {
		struct msgb *msg = msgb_dequeue(&state->upqueue);
		msgb_free(msg);
	}

	/* disable all slots, kick all TBFs */
	for (trx = 0; trx < 8; trx++) {
#ifdef ENABLE_DIRECT_PHY
		if (bts->trx[trx].fl1h) {
			l1if_close_pdch(bts->trx[trx].fl1h);
			bts->trx[trx].fl1h = NULL;
		}
#endif
		for (ts = 0; ts < 8; ts++)
			bts->trx[trx].pdch[ts].disable();
/* FIXME: NOT ALL RESOURCES are freed in this case... inconsistent with the other code. Share the code with pcu_l1if.c
for the reset. */
		gprs_rlcmac_tbf::free_all(&bts->trx[trx]);
	}

	gprs_bssgp_destroy();
	exit(0);
}
Пример #30
0
int bsc_msc_connect(struct bsc_msc_connection *con)
{
	struct bsc_msc_dest *dest;
	struct osmo_fd *fd;
	struct sockaddr_in sin;
	int on = 1, ret;

	if (llist_empty(con->dests)) {
		LOGP(DMSC, LOGL_ERROR,
			"No MSC(%s) connections configured.\n",
			con->name);
		connection_loss(con);
		return -1;
	}

	/* TODO: Why are we not using the libosmocore soecket
	 * abstraction, or libosmo-netif? */

	/* move to the next connection */
	dest = (struct bsc_msc_dest *) con->dests->next;
	llist_del(&dest->list);
	llist_add_tail(&dest->list, con->dests);

	LOGP(DMSC, LOGL_NOTICE,
		"Attempting to connect MSC(%s) at %s:%d\n",
		con->name, dest->ip, dest->port);

	con->is_connected = 0;

	msgb_free(con->pending_msg);
	con->pending_msg = NULL;

	fd = &con->write_queue.bfd;
	fd->fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
	fd->priv_nr = 1;

	if (fd->fd < 0) {
		perror("Creating TCP socket failed");
		return fd->fd;
	}

	/* make it non blocking */
	setnonblocking(fd);

	/* set the socket priority */
	ret = setsockopt(fd->fd, IPPROTO_IP, IP_TOS,
			 &dest->dscp, sizeof(dest->dscp));
	if (ret != 0)
		LOGP(DMSC, LOGL_ERROR,
			"Failed to set DSCP to %d on MSC(%s). %s\n",
			dest->dscp, con->name, strerror(errno));

	memset(&sin, 0, sizeof(sin));
	sin.sin_family = AF_INET;
	sin.sin_port = htons(dest->port);
	inet_aton(dest->ip, &sin.sin_addr);

	ret = setsockopt(fd->fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on));
	if (ret != 0)
		LOGP(DMSC, LOGL_ERROR,
		     "Failed to set SO_REUSEADDR socket option\n");
	ret = connect(fd->fd, (struct sockaddr *) &sin, sizeof(sin));

	if (ret == -1 && errno == EINPROGRESS) {
		LOGP(DMSC, LOGL_ERROR,
			"MSC(%s) Connection in progress\n", con->name);
		fd->when = BSC_FD_WRITE;
		fd->cb = msc_connection_connect;
		con->timeout_timer.cb = msc_con_timeout;
		con->timeout_timer.data = con;
		osmo_timer_schedule(&con->timeout_timer, 20, 0);
	} else if (ret < 0) {
		perror("Connection failed");
		connection_loss(con);
		return ret;
	} else {
		fd->when = BSC_FD_READ | BSC_FD_EXCEPT;
		fd->cb = osmo_wqueue_bfd_cb;
		con->is_connected = 1;
		if (con->connected)
			con->connected(con);
	}

	ret = osmo_fd_register(fd);
	if (ret < 0) {
		perror("Registering the fd failed");
		close(fd->fd);
		return ret;
	}

	return ret;
}