static void mat_mul_demo(void) { while (1) { void *data; int len; /* wait for data... */ buffer_pull(&data, &len); /* Process incoming message/data */ if (*(int *)data == SHUTDOWN_MSG) { /* disable interrupts and free resources */ remoteproc_resource_deinit(proc); break; } else { env_memcpy(matrix_array, data, len); /* Process received data and multiple matrices. */ Matrix_Multiply(&matrix_array[0], &matrix_array[1], &matrix_result); /* Send result back */ if (RPMSG_SUCCESS != rpmsg_send(app_rp_chnl, &matrix_result, sizeof(matrix))) { xil_printf("Error: rpmsg_send failed\n"); } } } }
/** * platform_get_processor_info * * Copies the target info from the user defined data structures to * HIL proc data structure.In case of remote contexts this function * is called with the reserved CPU ID HIL_RSVD_CPU_ID, because for * remotes there is only one master. * * @param proc - HIL proc to populate * @param cpu_id - CPU ID * * return - status of execution */ int platform_get_processor_info(struct hil_proc *proc , int cpu_id) { int idx; for(idx = 0; idx < sizeof(proc_table)/sizeof(struct hil_proc); idx++) { if((cpu_id == HIL_RSVD_CPU_ID) || (proc_table[idx].cpu_id == cpu_id) ) { env_memcpy(proc,&proc_table[idx], sizeof(struct hil_proc)); return 0; } } return -1; }
static void mat_mul_demo(void *unused_arg) { int status = 0; (void)unused_arg; /* Create buffer to send data between RPMSG callback and processing task */ buffer_create(); /* Initialize HW and SW components/objects */ init_system(); /* Resource table needs to be provided to remoteproc_resource_init() */ rsc_info.rsc_tab = (struct resource_table *)&resources; rsc_info.size = sizeof(resources); /* Initialize OpenAMP framework */ status = remoteproc_resource_init(&rsc_info, rpmsg_channel_created, rpmsg_channel_deleted, rpmsg_read_cb, &proc); if (RPROC_SUCCESS != status) { xil_printf("Error: initializing OpenAMP framework\n"); return; } /* Stay in data processing loop until we receive a 'shutdown' message */ while (1) { void *data; int len; /* wait for data... */ buffer_pull(&data, &len); /* Process incoming message/data */ if (*(int *)data == SHUTDOWN_MSG) { /* disable interrupts and free resources */ remoteproc_resource_deinit(proc); /* Terminate this task */ vTaskDelete(NULL); break; } else { env_memcpy(matrix_array, data, len); /* Process received data and multiple matrices. */ Matrix_Multiply(&matrix_array[0], &matrix_array[1], &matrix_result); /* Send result back */ if (RPMSG_SUCCESS != rpmsg_send(app_rp_chnl, &matrix_result, sizeof(matrix))) { xil_printf("Error: rpmsg_send failed\n"); } } } }
/* return 0 if buffer is full and data were not saved */ int buffer_push(void *data, int len) { /* full ? */ if (((rb.head + 1) % RB_SZ) == rb.tail) return 0; env_memcpy(&rb.buffer[sizeof(int)][rb.head], data, len); *(int *)&rb.buffer[0][rb.head] = len; rb.head = (rb.head + 1) % RB_SZ; /* notify possibly waiting receiver */ env_release_sync_lock(rb.sync_lock); return 1; }
static void rpmsg_read_cb(struct rpmsg_channel *rp_chnl, void *data, int len, void *priv, unsigned long src) { if ((*(int *)data) == SHUTDOWN_MSG) { remoteproc_resource_deinit(proc); } else { env_memcpy(matrix_array, data, len); /* Process received data and multiple matrices. */ Matrix_Multiply(&matrix_array[0], &matrix_array[1], &matrix_result); /* Send the result of matrix multiplication back to master. */ rpmsg_send(app_rp_chnl, &matrix_result, sizeof(matrix)); } }
void matrix_mul(){ #ifdef USE_FREERTOS for( ;; ){ if( xQueueReceive(mat_mul_queue, &mat_array, portMAX_DELAY )){ env_memcpy(matrix_array, mat_array.data, mat_array.length); Matrix_Multiply(&matrix_array[0], &matrix_array[1], &matrix_result); mat_result_array.length = sizeof(matrix); mat_result_array.data = &matrix_result; xQueueSend( OpenAMPInstPtr.send_queue, &mat_result_array, portMAX_DELAY ); } } #else if(pq_qlength(mat_mul_queue) > 0){ mat_array = pq_dequeue(mat_mul_queue); env_memcpy(matrix_array, mat_array->data, mat_array->length); /* Process received data and multiple matrices. */ Matrix_Multiply(&matrix_array[0], &matrix_array[1], &matrix_result); mat_result_array.length = sizeof(matrix); mat_result_array.data = &matrix_result; pq_enqueue(OpenAMPInstPtr.send_queue, &mat_result_array); } #endif }
int _boot_cpu(int cpu_id, unsigned int load_addr) { unsigned int reg; unsigned int tramp_size; unsigned int tramp_addr = 0; if (load_addr) { tramp_size = zynq_trampoline_end - zynq_trampoline; if ((load_addr < tramp_size) || (load_addr & 0x3)) { return -1; } tramp_size = &zynq_trampoline_jump - &zynq_trampoline; /* * Trampoline code is copied to address 0 from where remote core is expected to * fetch first instruction after reset.If master is using the address 0 then * this mem copy will screwed the system. It is user responsibility to not * copy trampoline code in such cases. * */ env_memcpy((char *)tramp_addr, &zynq_trampoline, tramp_size); /* Write image address at the word reserved at the trampoline end */ HIL_MEM_WRITE32((char *)(tramp_addr + tramp_size), load_addr); } unlock_slcr(); reg = HIL_MEM_READ32(ESAL_DP_SLCR_BASE + A9_CPU_SLCR_RESET_CTRL); reg &= ~(A9_CPU_SLCR_CLK_STOP << cpu_id); HIL_MEM_WRITE32(ESAL_DP_SLCR_BASE + A9_CPU_SLCR_RESET_CTRL, reg); /* De-assert reset signal and start clock to start the core */ reg &= ~(A9_CPU_SLCR_RST << cpu_id); HIL_MEM_WRITE32(ESAL_DP_SLCR_BASE + A9_CPU_SLCR_RESET_CTRL, reg); lock_slcr(); return 0; }
int rpmsg_send_offchannel_raw(struct rpmsg_channel *rp_chnl, unsigned long src, unsigned long dst, char *data, int size, int wait) { struct remote_device *rdev; struct rpmsg_hdr *rp_hdr; void *buffer; int status = RPMSG_SUCCESS; unsigned short idx; int tick_count = 0; int buff_len; if (!rp_chnl) { return RPMSG_ERR_PARAM; } /* Get the associated remote device for channel. */ rdev = rp_chnl->rdev; /* Validate device state */ if (rp_chnl->state != RPMSG_CHNL_STATE_ACTIVE || rdev->state != RPMSG_DEV_STATE_ACTIVE) { return RPMSG_ERR_DEV_STATE; } /* Lock the device to enable exclusive access to virtqueues */ env_lock_mutex(rdev->lock); /* Get rpmsg buffer for sending message. */ buffer = rpmsg_get_tx_buffer(rdev, &buff_len, &idx); if (!buffer && !wait) { status = RPMSG_ERR_NO_MEM; } env_unlock_mutex(rdev->lock); if (status == RPMSG_SUCCESS) { while (!buffer) { /* * Wait parameter is true - pool the buffer for * 15 secs as defined by the APIs. */ env_sleep_msec(RPMSG_TICKS_PER_INTERVAL); env_lock_mutex(rdev->lock); buffer = rpmsg_get_tx_buffer(rdev, &buff_len, &idx); env_unlock_mutex(rdev->lock); tick_count += RPMSG_TICKS_PER_INTERVAL; if (tick_count >= (RPMSG_TICK_COUNT / RPMSG_TICKS_PER_INTERVAL)) { status = RPMSG_ERR_NO_BUFF; break; } } if (status == RPMSG_SUCCESS) { //FIXME : may be just copy the data size equal to buffer length and Tx it. if (size > (buff_len - sizeof(struct rpmsg_hdr))) status = RPMSG_ERR_BUFF_SIZE; if (status == RPMSG_SUCCESS) { rp_hdr = (struct rpmsg_hdr *) buffer; /* Initialize RPMSG header. */ rp_hdr->dst = dst; rp_hdr->src = src; rp_hdr->len = size; /* Copy data to rpmsg buffer. */ env_memcpy(rp_hdr->data, data, size); env_lock_mutex(rdev->lock); /* Enqueue buffer on virtqueue. */ status = rpmsg_enqueue_buffer(rdev, buffer, buff_len, idx); if (status == RPMSG_SUCCESS) { /* Let the other side know that there is a job to process. */ virtqueue_kick(rdev->tvq); } env_unlock_mutex(rdev->lock); } } } /* Do cleanup in case of error.*/ if (status != RPMSG_SUCCESS) { rpmsg_free_buffer(rdev, buffer); } return status; }