Example #1
0
static void poll_noc_rx_buffer(int pcie_eth_if, uint32_t c2h_q)
{
	mppa_pcie_noc_rx_buf_t *bufs[MPPA_PCIE_MULTIBUF_BURST], *buf;
	int ret = 0, buf_idx, pkt_idx, count;
	struct mpodp_if_config *cfg = netdev_get_eth_if_config(pcie_eth_if);
	struct mpodp_c2h_entry free_pkt;
	int nb_bufs;
	int do_it = 1;
	if (netdev_c2h_is_full(cfg, c2h_q)) {
		dbg_printf("PCIe eth tx is full !!!\n");
		return;
	}

	nb_bufs = buffer_ring_get_multi(&g_full_buf_pool[pcie_eth_if][c2h_q], bufs,
					MPPA_PCIE_MULTIBUF_BURST, NULL);
	if (nb_bufs == 0)
		return;
	assert(ret <= MPPA_PCIE_MULTIBUF_COUNT);

	dbg_printf("%d buffer ready to be sent\n", nb_bufs);
	for(buf_idx = 0, count = 0; buf_idx < nb_bufs; buf_idx++) {
		buf = bufs[buf_idx];

		for (pkt_idx = 0; pkt_idx < buf->pkt_count; ++pkt_idx) {
			/* Read header from packet */
			count++;

			do_it = (count % IT_BURSTINESS == 0);
			if (do_it)
				if (!__builtin_k1_lwu(&cfg->interrupt_status))
					do_it = 0;
			do {
				ret = netdev_c2h_enqueue_data(cfg, c2h_q, &buf->pkts[pkt_idx], &free_pkt,
							      do_it);
			} while (ret < 0);

			if (free_pkt.data)
				buffer_ring_push_multi(&g_free_buf_pool,
						       (mppa_pcie_noc_rx_buf_t **)(uintptr_t)&free_pkt.data,
						       1, NULL);

		}

		pkt_count[pcie_eth_if]++;
		dbg_printf("%d packets handled, total %llu\n", buf->pkt_count, pkt_count[pcie_eth_if]);
	}

	if (!do_it) {
		/* Last data push did not trig an IT, flush if required */
		if (__builtin_k1_lwu(&cfg->interrupt_status))
			mppa_pcie_send_it_to_host();
	}
}
Example #2
0
File: rpc.c Project: mitra/odp-mppa
static inline int pcie_add_forward(unsigned int pcie_eth_if_id,
								   struct mppa_pcie_eth_dnoc_tx_cfg *dnoc_tx_cfg)
{
	struct mppa_pcie_eth_if_config * cfg = netdev_get_eth_if_config(pcie_eth_if_id);
	struct mppa_pcie_eth_h2c_ring_buff_entry entry;

	entry.len = dnoc_tx_cfg->mtu;
	entry.pkt_addr = (uint32_t)dnoc_tx_cfg->fifo_addr;
	entry.flags = MPPA_PCIE_ETH_NEED_PKT_HDR;

	return netdev_h2c_enqueue_buffer(cfg, &entry);
}
Example #3
0
static inline int pcie_add_forward(struct mppa_pcie_eth_dnoc_tx_cfg *dnoc_tx_cfg,
				   mppa_rpc_odp_answer_t *answer)
{
	struct mpodp_if_config * cfg =
		netdev_get_eth_if_config(dnoc_tx_cfg->pcie_eth_if);
	struct mpodp_h2c_entry entry;

	entry.pkt_addr = (uint32_t)dnoc_tx_cfg->fifo_addr;

	if (netdev_h2c_enqueue_buffer(cfg, dnoc_tx_cfg->h2c_q, &entry)) {
		PCIE_RPC_ERR_MSG(answer,
				 "Failed to register cluster to pcie interface %d\n",
				 dnoc_tx_cfg->pcie_eth_if);
		return -1;
	}
	return 0;
}
Example #4
0
		ret = mppa_noc_cnoc_tx_alloc_auto(if_id, &tag, MPPA_NOC_BLOCKING);
		assert(ret == MPPA_NOC_RET_SUCCESS);
		cnoc_tx_tags[if_id] = tag;
	}
	return cnoc_tx_tags[if_id];
}

static void pcie_open(unsigned remoteClus, mppa_rpc_odp_t * msg,
		      mppa_rpc_odp_answer_t *answer)
{
	mppa_rpc_odp_cmd_pcie_open_t open_cmd = {.inl_data = msg->inl_data};
	struct mppa_pcie_eth_dnoc_tx_cfg *tx_cfg;
	int if_id = remoteClus % MPPA_PCIE_USABLE_DNOC_IF;
	unsigned int tx_id;
	const struct mpodp_if_config * cfg =
		netdev_get_eth_if_config(open_cmd.pcie_eth_if_id);

	dbg_printf("Received request to open PCIe\n");
	if (!netdev_initialized) {
		if (netdev_start()){
			PCIE_RPC_ERR_MSG(answer, "Failed to initialize netdevs\n");
			return;
		}
		netdev_initialized = 1;
	}

	if (open_cmd.pkt_size < cfg->mtu) {
		PCIE_RPC_ERR_MSG(answer, "Cluster MTU %d is smaller than PCI MTU %d\n",
				 open_cmd.pkt_size, cfg->mtu);
		return;
	}