struct rpmsg_device * platform_create_rpmsg_vdev(void *platform, unsigned int vdev_index, unsigned int role, void (*rst_cb)(struct virtio_device *vdev), rpmsg_ns_bind_cb ns_bind_cb) { struct remoteproc *rproc = platform; struct rpmsg_virtio_device *rpmsg_vdev; struct virtio_device *vdev; void *shbuf; struct metal_io_region *shbuf_io; int ret; rpmsg_vdev = metal_allocate_memory(sizeof(*rpmsg_vdev)); if (!rpmsg_vdev) return NULL; shbuf_io = remoteproc_get_io_with_pa(rproc, SHARED_MEM_PA); if (!shbuf_io) return NULL; shbuf = metal_io_phys_to_virt(shbuf_io, SHARED_MEM_PA + SHARED_BUF_OFFSET); xil_printf("creating remoteproc virtio\r\n"); /* TODO: can we have a wrapper for the following two functions? */ vdev = remoteproc_create_virtio(rproc, vdev_index, role, rst_cb); if (!vdev) { xil_printf("failed remoteproc_create_virtio\r\n"); goto err1; } xil_printf("initializing rpmsg vdev\r\n"); if (role == VIRTIO_DEV_MASTER) { /* Only RPMsg virtio master needs to initialize the * shared buffers pool */ rpmsg_virtio_init_shm_pool(&shpool, shbuf, (SHARED_MEM_SIZE - SHARED_BUF_OFFSET)); /* RPMsg virtio slave can set shared buffers pool * argument to NULL */ ret = rpmsg_init_vdev(rpmsg_vdev, vdev, ns_bind_cb, shbuf_io, &shpool); } else { ret = rpmsg_init_vdev(rpmsg_vdev, vdev, ns_bind_cb, shbuf_io, NULL); } if (ret) { xil_printf("failed rpmsg_init_vdev\r\n"); goto err2; } xil_printf("initializing rpmsg vdev\r\n"); return rpmsg_virtio_get_rpmsg_device(rpmsg_vdev); err2: remoteproc_remove_virtio(rproc, vdev); err1: metal_free_memory(rpmsg_vdev); return NULL; }
static int metal_cntr_irq_attach(struct metal_irq_controller *cntr, int irq, metal_irq_handler hd, void *arg) { if (irq < 0 || irq >= cntr->irq_num) return -EINVAL; if (hd) { struct metal_irq *data; data = metal_allocate_memory(sizeof(*data)); if (data == NULL) return -ENOMEM; data->hd = hd; data->arg = arg; irq_attach(irq, metal_cntr_irq_handler, data); } else { unsigned int flags; flags = metal_irq_save_disable(); irq_dispatch(irq, NULL); /* fake a irq request */ metal_irq_restore_enable(flags); } return 0; }
static void *alloc_thread(void *arg) { int i; void *ptr; void *rv = 0; (void)arg; for (i = 0; i < test_count; i++) { /* expecting the implementation to be thread safe */ ptr = metal_allocate_memory(256 /*10*i*/); if (!ptr) { metal_log(METAL_LOG_DEBUG, "failed to allocate memmory\n"); rv = (void *)-ENOMEM; break; } metal_free_memory(ptr); } return rv; }
/** * @brief measure_shmem_throughputd() - measure shmem throughpput with libmetal * - Download throughput measurement: * Start TTC RPU counter, wait for IPI kick, check if data is * available, if yes, read as much data as possible from shared * memory. It will iterates untill 1000 packages have been received, * stop TTC RPU counter and kick IPI to notify the remote. Repeat * for different package size. * - Upload throughput measurement: * Start TTC RPU counter, write data to shared memory and kick IPI * to notify remote. It will iterate for 1000 times, stop TTC RPU * counter.Wait for APU IPI kick to know APU has received all the * packages. Kick IPI to notify it TTC RPU conter value is ready to * read. Repeat for different package size. * * @param[in] ch - channel information * @return - 0 on success, error code if failure. */ static int measure_shmem_throughputd(struct channel_s *ch) { void *lbuf = NULL; int ret = 0; size_t s; uint32_t rx_count, rx_avail, tx_count, iterations; unsigned long tx_avail_offset, rx_avail_offset; unsigned long tx_addr_offset, rx_addr_offset; unsigned long tx_data_offset, rx_data_offset; uint32_t buf_phy_addr_32; /* allocate memory for receiving data */ lbuf = metal_allocate_memory(BUF_SIZE_MAX); if (!lbuf) { LPERROR("Failed to allocate memory.\r\n"); return -1; } memset(lbuf, 0xA, BUF_SIZE_MAX); /* Clear shared memory */ metal_io_block_set(ch->shm_io, 0, 0, metal_io_region_size(ch->shm_io)); LPRINTF("Starting shared mem throughput demo\n"); /* for each data size, measure block receive throughput */ for (s = PKG_SIZE_MIN; s <= PKG_SIZE_MAX; s <<= 1) { rx_count = 0; iterations = TOTAL_DATA_SIZE / s; /* Set rx buffer address offset */ rx_avail_offset = SHM_DESC_OFFSET_RX + SHM_DESC_AVAIL_OFFSET; rx_addr_offset = SHM_DESC_OFFSET_RX + SHM_DESC_ADDR_ARRAY_OFFSET; rx_data_offset = SHM_DESC_OFFSET_RX + SHM_BUFF_OFFSET_RX; wait_for_notified(&ch->remote_nkicked); /* Data has arrived, seasure start. Reset RPU TTC counter */ reset_timer(ch->ttc_io, TTC_CNT_RPU_TO_APU); while (1) { rx_avail = metal_io_read32(ch->shm_io, rx_avail_offset); while(rx_count != rx_avail) { /* Get the buffer location from the shared * memory rx address array. */ buf_phy_addr_32 = metal_io_read32(ch->shm_io, rx_addr_offset); rx_data_offset = metal_io_phys_to_offset( ch->shm_io, (metal_phys_addr_t)buf_phy_addr_32); if (rx_data_offset == METAL_BAD_OFFSET) { LPERROR( "[%u]failed to get rx offset: 0x%x, 0x%lx.\n", rx_count, buf_phy_addr_32, metal_io_phys(ch->shm_io, rx_addr_offset)); ret = -EINVAL; goto out; } rx_addr_offset += sizeof(buf_phy_addr_32); /* Read data from shared memory */ metal_io_block_read(ch->shm_io, rx_data_offset, lbuf, s); rx_count++; } if (rx_count < iterations) /* Need to wait for more data */ wait_for_notified(&ch->remote_nkicked); else break; } /* Stop RPU TTC counter */ stop_timer(ch->ttc_io, TTC_CNT_RPU_TO_APU); /* Clear remote kicked flag -- 0 is kicked */ atomic_init(&ch->remote_nkicked, 1); /* Kick IPI to notify RPU TTC counter value is ready */ metal_io_write32(ch->ipi_io, IPI_TRIG_OFFSET, ch->ipi_mask); } /* for each data size, measure send throughput */ for (s = PKG_SIZE_MIN; s <= PKG_SIZE_MAX; s <<= 1) { tx_count = 0; iterations = TOTAL_DATA_SIZE / s; /* Set tx buffer address offset */ tx_avail_offset = SHM_DESC_OFFSET_TX + SHM_DESC_AVAIL_OFFSET; tx_addr_offset = SHM_DESC_OFFSET_TX + SHM_DESC_ADDR_ARRAY_OFFSET; tx_data_offset = SHM_DESC_OFFSET_TX + SHM_BUFF_OFFSET_TX; /* Wait for APU to signal it is ready for the measurement */ wait_for_notified(&ch->remote_nkicked); /* Data has arrived, seasure start. Reset RPU TTC counter */ reset_timer(ch->ttc_io, TTC_CNT_RPU_TO_APU); while (tx_count < iterations) { /* Write data to the shared memory*/ metal_io_block_write(ch->shm_io, tx_data_offset, lbuf, s); /* Write to the address array to tell the other end * the buffer address. */ buf_phy_addr_32 = (uint32_t)metal_io_phys(ch->shm_io, tx_data_offset); metal_io_write32(ch->shm_io, tx_addr_offset, buf_phy_addr_32); tx_data_offset += s; tx_addr_offset += sizeof(buf_phy_addr_32); /* Increase number of available buffers */ tx_count++; metal_io_write32(ch->shm_io, tx_avail_offset, tx_count); /* Kick IPI to notify remote data is ready in the * shared memory */ metal_io_write32(ch->ipi_io, IPI_TRIG_OFFSET, ch->ipi_mask); } /* Stop RPU TTC counter */ stop_timer(ch->ttc_io, TTC_CNT_RPU_TO_APU); /* Wait for IPI kick to know when the remote is ready * to read the TTC counter value */ wait_for_notified(&ch->remote_nkicked); /* Kick IPI to notify RPU TTC counter value is ready */ metal_io_write32(ch->ipi_io, IPI_TRIG_OFFSET, ch->ipi_mask); } out: if (lbuf) metal_free_memory(lbuf); return ret; }
/** * @brief ipi_shmem_echo() - shared memory IPI demo * This task will: * * Get the timestamp and put it into the ping shared memory * * Update the shared memory descriptor for the new available * ping buffer. * * Trigger IPI to notifty the remote. * * Repeat the above steps until it sends out all the packages. * * Monitor IPI interrupt, verify every received package. * * After all the packages are received, it sends out shutdown * message to the remote. * * @param[in] ipi_io - IPI metal i/o region * @param[in] shm_io - shared memory metal i/o region * @return - return 0 on success, otherwise return error number indicating * type of error. */ static int ipi_shmem_echo(struct metal_io_region *ipi_io, struct metal_io_region *shm_io) { int ret; uint32_t i; uint32_t rx_avail; unsigned long tx_avail_offset, rx_avail_offset; unsigned long rx_used_offset; unsigned long tx_addr_offset, rx_addr_offset; unsigned long tx_data_offset, rx_data_offset; unsigned long long tstart, tend; long long tdiff; long long tdiff_avg_s = 0, tdiff_avg_ns = 0; void *txbuf = NULL, *rxbuf = NULL, *tmpptr; struct msg_hdr_s *msg_hdr; uint32_t ipi_mask = IPI_MASK; uint32_t tx_phy_addr_32; txbuf = metal_allocate_memory(BUF_SIZE_MAX); if (!txbuf) { LPERROR("Failed to allocate local tx buffer for msg.\n"); ret = -ENOMEM; goto out; } rxbuf = metal_allocate_memory(BUF_SIZE_MAX); if (!rxbuf) { LPERROR("Failed to allocate local rx buffer for msg.\n"); ret = -ENOMEM; goto out; } /* Clear shared memory */ metal_io_block_set(shm_io, 0, 0, metal_io_region_size(shm_io)); /* Set tx/rx buffer address offset */ tx_avail_offset = SHM_DESC_OFFSET_TX + SHM_DESC_AVAIL_OFFSET; rx_avail_offset = SHM_DESC_OFFSET_RX + SHM_DESC_AVAIL_OFFSET; rx_used_offset = SHM_DESC_OFFSET_RX + SHM_DESC_USED_OFFSET; tx_addr_offset = SHM_DESC_OFFSET_TX + SHM_DESC_ADDR_ARRAY_OFFSET; rx_addr_offset = SHM_DESC_OFFSET_RX + SHM_DESC_ADDR_ARRAY_OFFSET; tx_data_offset = SHM_DESC_OFFSET_TX + SHM_BUFF_OFFSET_TX; rx_data_offset = SHM_DESC_OFFSET_RX + SHM_BUFF_OFFSET_RX; LPRINTF("Start echo flood testing....\n"); LPRINTF("Sending msgs to the remote.\n"); for (i = 0; i < PKGS_TOTAL; i++) { /* Construct a message to send */ tmpptr = txbuf; msg_hdr = tmpptr; msg_hdr->index = i; msg_hdr->len = sizeof(tstart); tmpptr += sizeof(struct msg_hdr_s); tstart = get_timestamp(); *(unsigned long long *)tmpptr = tstart; /* copy message to shared buffer */ metal_io_block_write(shm_io, tx_data_offset, msg_hdr, sizeof(struct msg_hdr_s) + msg_hdr->len); /* Write to the address array to tell the other end * the buffer address. */ tx_phy_addr_32 = (uint32_t)metal_io_phys(shm_io, tx_data_offset); metal_io_write32(shm_io, tx_addr_offset, tx_phy_addr_32); tx_data_offset += sizeof(struct msg_hdr_s) + msg_hdr->len; tx_addr_offset += sizeof(uint32_t); /* Increase number of available buffers */ metal_io_write32(shm_io, tx_avail_offset, (i + 1)); /* Kick IPI to notify data has been put to shared buffer */ metal_io_write32(ipi_io, IPI_TRIG_OFFSET, ipi_mask); } LPRINTF("Waiting for messages to echo back and verify.\n"); i = 0; tx_data_offset = SHM_DESC_OFFSET_TX + SHM_BUFF_OFFSET_TX; while (i != PKGS_TOTAL) { wait_for_notified(&remote_nkicked); rx_avail = metal_io_read32(shm_io, rx_avail_offset); while (i != rx_avail) { uint32_t rx_phy_addr_32; /* Received pong from the other side */ /* Get the buffer location from the shared memory * rx address array. */ rx_phy_addr_32 = metal_io_read32(shm_io, rx_addr_offset); rx_data_offset = metal_io_phys_to_offset(shm_io, (metal_phys_addr_t)rx_phy_addr_32); if (rx_data_offset == METAL_BAD_OFFSET) { LPERROR("failed to get rx [%d] offset: 0x%x.\n", i, rx_phy_addr_32); ret = -EINVAL; goto out; } rx_addr_offset += sizeof(rx_phy_addr_32); /* Read message header from shared memory */ metal_io_block_read(shm_io, rx_data_offset, rxbuf, sizeof(struct msg_hdr_s)); msg_hdr = (struct msg_hdr_s *)rxbuf; /* Check if the message header is valid */ if (msg_hdr->index != (uint32_t)i) { LPERROR("wrong msg: expected: %d, actual: %d\n", i, msg_hdr->index); ret = -EINVAL; goto out; } if (msg_hdr->len != sizeof(tstart)) { LPERROR("wrong msg: length invalid: %lu, %u.\n", sizeof(tstart), msg_hdr->len); ret = -EINVAL; goto out; } /* Read message */ rx_data_offset += sizeof(*msg_hdr); metal_io_block_read(shm_io, rx_data_offset, rxbuf + sizeof(*msg_hdr), msg_hdr->len); rx_data_offset += msg_hdr->len; /* increase rx used count to indicate it has consumed * the received data */ metal_io_write32(shm_io, rx_used_offset, (i + 1)); /* Verify message */ /* Get tx message previously sent*/ metal_io_block_read(shm_io, tx_data_offset, txbuf, sizeof(*msg_hdr) + sizeof(tstart)); tx_data_offset += sizeof(*msg_hdr) + sizeof(tstart); /* Compare the received message and the sent message */ ret = memcmp(rxbuf, txbuf, sizeof(*msg_hdr) + sizeof(tstart)); if (ret) { LPERROR("data[%u] verification failed.\n", i); LPRINTF("Expected:"); dump_buffer(txbuf, sizeof(*msg_hdr) + sizeof(tstart)); LPRINTF("Actual:"); dump_buffer(rxbuf, sizeof(*msg_hdr) + sizeof(tstart)); ret = -EINVAL; goto out; } i++; } } tend = get_timestamp(); tdiff = tend - tstart; /* Send shutdown message */ tmpptr = txbuf; msg_hdr = tmpptr; msg_hdr->index = i; msg_hdr->len = strlen(SHUTDOWN); tmpptr += sizeof(struct msg_hdr_s); sprintf(tmpptr, SHUTDOWN); /* copy message to shared buffer */ metal_io_block_write(shm_io, tx_data_offset, msg_hdr, sizeof(struct msg_hdr_s) + msg_hdr->len); tx_phy_addr_32 = (uint32_t)metal_io_phys(shm_io, tx_data_offset); metal_io_write32(shm_io, tx_addr_offset, tx_phy_addr_32); metal_io_write32(shm_io, tx_avail_offset, PKGS_TOTAL + 1); LPRINTF("Kick remote to notify shutdown message sent...\n"); metal_io_write32(ipi_io, IPI_TRIG_OFFSET, ipi_mask); tdiff /= PKGS_TOTAL; tdiff_avg_s = tdiff / NS_PER_S; tdiff_avg_ns = tdiff % NS_PER_S; LPRINTF("Total packages: %d, time_avg = %lds, %ldns\n", i, (long int)tdiff_avg_s, (long int)tdiff_avg_ns); ret = 0; out: if (txbuf) metal_free_memory(txbuf); if (rxbuf) metal_free_memory(rxbuf); return ret; }