Exemple #1
0
static inline int32_t
service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
{
	if (!service_valid(i))
		return -EINVAL;
	struct rte_service_spec_impl *s = &rte_services[i];
	if (s->comp_runstate != RUNSTATE_RUNNING ||
			s->app_runstate != RUNSTATE_RUNNING ||
			!(service_mask & (UINT64_C(1) << i)))
		return -ENOEXEC;

	/* check do we need cmpset, if MT safe or <= 1 core
	 * mapped, atomic ops are not required.
	 */
	const int use_atomics = (service_mt_safe(s) == 0) &&
				(rte_atomic32_read(&s->num_mapped_cores) > 1);
	if (use_atomics) {
		if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
			return -EBUSY;

		rte_service_runner_do_callback(s, cs, i);
		rte_atomic32_clear(&s->execute_lock);
	} else
		rte_service_runner_do_callback(s, cs, i);

	return 0;
}
Exemple #2
0
/*
 * Write scattered channel packet to TX bufring.
 *
 * The offset of this channel packet is written as a 64bits value
 * immediately after this channel packet.
 *
 * The write goes through three stages:
 *  1. Reserve space in ring buffer for the new data.
 *     Writer atomically moves priv_write_index.
 *  2. Copy the new data into the ring.
 *  3. Update the tail of the ring (visible to host) that indicates
 *     next read location. Writer updates write_index
 */
int
vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen,
		 bool *need_sig)
{
	struct vmbus_bufring *vbr = tbr->vbr;
	uint32_t ring_size = tbr->dsize;
	uint32_t old_windex, next_windex, windex, total;
	uint64_t save_windex;
	int i;

	total = 0;
	for (i = 0; i < iovlen; i++)
		total += iov[i].iov_len;
	total += sizeof(save_windex);

	/* Reserve space in ring */
	do {
		uint32_t avail;

		/* Get current free location */
		old_windex = tbr->windex;

		/* Prevent compiler reordering this with calculation */
		rte_compiler_barrier();

		avail = vmbus_br_availwrite(tbr, old_windex);

		/* If not enough space in ring, then tell caller. */
		if (avail <= total)
			return -EAGAIN;

		next_windex = vmbus_br_idxinc(old_windex, total, ring_size);

		/* Atomic update of next write_index for other threads */
	} while (!rte_atomic32_cmpset(&tbr->windex, old_windex, next_windex));

	/* Space from old..new is now reserved */
	windex = old_windex;
	for (i = 0; i < iovlen; i++) {
		windex = vmbus_txbr_copyto(tbr, windex,
					   iov[i].iov_base, iov[i].iov_len);
	}

	/* Set the offset of the current channel packet. */
	save_windex = ((uint64_t)old_windex) << 32;
	windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
				   sizeof(save_windex));

	/* The region reserved should match region used */
	RTE_ASSERT(windex == next_windex);

	/* Ensure that data is available before updating host index */
	rte_smp_wmb();

	/* Checkin for our reservation. wait for our turn to update host */
	while (!rte_atomic32_cmpset(&vbr->windex, old_windex, next_windex))
		rte_pause();

	/* If host had read all data before this, then need to signal */
	*need_sig |= vmbus_txbr_need_signal(tbr, old_windex);
	return 0;
}
Exemple #3
0
static int
process_request(struct channel_packet *pkt, struct channel_info *chan_info)
{
	uint64_t core_mask;

	if (chan_info == NULL)
		return -1;

	if (rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_CONNECTED,
			CHANNEL_MGR_CHANNEL_PROCESSING) == 0)
		return -1;

	if (pkt->command == CPU_POWER) {
		core_mask = get_pcpus_mask(chan_info, pkt->resource_id);
		if (core_mask == 0) {
			RTE_LOG(ERR, CHANNEL_MONITOR, "Error get physical CPU mask for "
				"channel '%s' using vCPU(%u)\n", chan_info->channel_path,
				(unsigned)pkt->unit);
			return -1;
		}
		if (__builtin_popcountll(core_mask) == 1) {

			unsigned core_num = __builtin_ffsll(core_mask) - 1;

			switch (pkt->unit) {
			case(CPU_POWER_SCALE_MIN):
					power_manager_scale_core_min(core_num);
			break;
			case(CPU_POWER_SCALE_MAX):
					power_manager_scale_core_max(core_num);
			break;
			case(CPU_POWER_SCALE_DOWN):
					power_manager_scale_core_down(core_num);
			break;
			case(CPU_POWER_SCALE_UP):
					power_manager_scale_core_up(core_num);
			break;
			default:
				break;
			}
		} else {
			switch (pkt->unit) {
			case(CPU_POWER_SCALE_MIN):
					power_manager_scale_mask_min(core_mask);
			break;
			case(CPU_POWER_SCALE_MAX):
					power_manager_scale_mask_max(core_mask);
			break;
			case(CPU_POWER_SCALE_DOWN):
					power_manager_scale_mask_down(core_mask);
			break;
			case(CPU_POWER_SCALE_UP):
					power_manager_scale_mask_up(core_mask);
			break;
			default:
				break;
			}

		}
	}
	/* Return is not checked as channel status may have been set to DISABLED
	 * from management thread
	 */
	rte_atomic32_cmpset(&(chan_info->status), CHANNEL_MGR_CHANNEL_PROCESSING,
			CHANNEL_MGR_CHANNEL_CONNECTED);
	return 0;

}