/** * @brief Request meta-data from camera module * * Allows the Camera to provide meta-data associated with a frame to the AP over * Greybus. * * @param operation pointer to structure of Greybus operation message * @return GB_OP_SUCCESS on success, error code on failure */ static uint8_t gb_camera_metadata(struct gb_operation *operation) { struct gb_camera_meta_data_request *request; struct metadata_info meta_data; int ret; lldbg("gb_camera_metadata() + \n"); if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } request = gb_operation_get_request_payload(operation); meta_data.request_id = le32_to_cpu(request->request_id); meta_data.frame_number = le16_to_cpu(request->frame_number); meta_data.stream = request->stream; meta_data.padding = request->padding; meta_data.data = request->data; lldbg(" request_id = %d \n", request->request_id); lldbg(" frame_number = %d \n", request->frame_number); lldbg(" stream = %d \n", request->stream); ret = device_camera_trans_metadata(info->dev, &meta_data); if (ret) { return gb_errno_to_op_result(ret); } lldbg("gb_camera_metadata() - \n"); return GB_OP_SUCCESS; }
static uint8_t gb_svc_dme_peer_set(struct gb_operation *op) { struct gb_svc_dme_peer_set_request *req; struct gb_svc_dme_peer_set_response *resp; int rc; uint16_t attr, selector; uint32_t value; if (gb_operation_get_request_payload_size(op) < sizeof(*req)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } req = gb_operation_get_request_payload(op); resp = gb_operation_alloc_response(op, sizeof(*resp)); if (!resp) { return GB_OP_NO_MEMORY; } attr = le16_to_cpu(req->attr); selector = le16_to_cpu(req->selector); value = le32_to_cpu(req->value); rc = svc_dme_peer_set(req->intf_id, attr, selector, value, &resp->result_code); resp->result_code = cpu_to_le16(resp->result_code); return gb_errno_to_op_result(rc); }
static uint8_t gb_control_connected(struct gb_operation *operation) { int retval; struct gb_control_connected_request *request = gb_operation_get_request_payload(operation); if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } retval = gb_listen(le16_to_cpu(request->cport_id)); if (retval) { gb_error("Can not connect cport %d: error %d\n", le16_to_cpu(request->cport_id), retval); return GB_OP_INVALID; } retval = gb_notify(le16_to_cpu(request->cport_id), GB_EVT_CONNECTED); if (retval) goto error_notify; return GB_OP_SUCCESS; error_notify: gb_stop_listening(le16_to_cpu(request->cport_id)); return gb_errno_to_op_result(retval); }
static uint8_t gb_control_disconnected(struct gb_operation *operation) { int retval; struct gb_control_connected_request *request = gb_operation_get_request_payload(operation); if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } retval = gb_notify(le16_to_cpu(request->cport_id), GB_EVT_DISCONNECTED); if (retval) { gb_error("Cannot notify GB driver of disconnect event.\n"); /* * don't return, we still want to reset the cport and stop listening * on the CPort. */ } #ifdef CONFIG_ARCH_CHIP_TSB unipro_reset_cport(le16_to_cpu(request->cport_id), NULL, NULL); #endif retval = gb_stop_listening(le16_to_cpu(request->cport_id)); if (retval) { gb_error("Can not disconnect cport %d: error %d\n", le16_to_cpu(request->cport_id), retval); return GB_OP_INVALID; } return GB_OP_SUCCESS; }
static uint8_t gb_ptp_set_current_flow(struct gb_operation *operation) { struct gb_ptp_set_current_flow_request *request; int ret; if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("%s(): dropping short message\n", __func__); return GB_OP_INVALID; } request = gb_operation_get_request_payload(operation); #ifdef CONFIG_GREYBUS_PTP_INT_RCV_NEVER if (request->direction == PTP_CURRENT_TO_MOD) return GB_OP_INVALID; #endif #if defined(CONFIG_GREYBUS_PTP_INT_SND_NEVER) && defined(CONFIG_GREYBUS_PTP_EXT_NONE) if (request->direction == PTP_CURRENT_FROM_MOD) return GB_OP_INVALID; #endif ret = device_ptp_set_current_flow(ptp_info->dev, request->direction); if (ret) return GB_OP_UNKNOWN_ERROR; return GB_OP_SUCCESS; }
/** * @brief Modem and line status process thread * * This function is the thread for processing modem and line status change. It * uses the operation to send the event to the peer. It only sends the required * status for protocol, not the all status in UART. * * @param data The regular thread data. * @return None. */ static void *uart_status_thread(void *data) { uint16_t updated_status = 0; struct gb_uart_serial_state_request *request; struct gb_uart_info *info = data; int ret = 0; while (1) { sem_wait(&info->status_sem); if (info->thread_stop) { break; } updated_status = parse_ms_ls_registers(info->updated_ms, info->updated_ls); /* * Only send the status bits which protocol need to know to peer */ if (info->last_serial_state ^ updated_status) { info->last_serial_state = updated_status; request = gb_operation_get_request_payload(info->ms_ls_operation); request->control = updated_status; ret = gb_operation_send_request(info->ms_ls_operation, NULL, false); if (ret) { uart_report_error(GB_UART_EVENT_PROTOCOL_ERROR, __func__); } } } return NULL; }
/** * @brief Protocol send data function. * * Requests that the UART device begin transmitting characters. One or more * bytes to be transmitted will be supplied. * * @param operation The pointer to structure of gb_operation. * @return GB_OP_SUCCESS on success, error code on failure. */ static uint8_t gb_uart_send_data(struct gb_operation *operation) { int ret, size; int sent = 0; size_t request_size = gb_operation_get_request_payload_size(operation); struct gb_uart_send_data_request *request = gb_operation_get_request_payload(operation); struct gb_bundle *bundle; if (request_size < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } size = le16_to_cpu(request->size); if (request_size < sizeof(*request) + size) { gb_error("dropping short message\n"); return GB_OP_INVALID; } bundle = gb_operation_get_bundle(operation); DEBUGASSERT(bundle); ret = device_uart_start_transmitter(bundle->dev, request->data, size, NULL, &sent, NULL); if (ret) { return GB_OP_UNKNOWN_ERROR; } return GB_OP_SUCCESS; }
/** * @brief Allocate operations for receiver buffers * * This function is allocating operation and use them as receiving buffers. * * @param max_nodes Maximum nodes. * @param buf_size Buffer size in operation. * @param queue Target queue. * @return 0 for success, -errno for failures. */ static int uart_alloc_op(int max_nodes, int buf_size, sq_queue_t *queue) { struct gb_operation *operation = NULL; struct gb_uart_receive_data_request *request = NULL; struct op_node *node = NULL; int i = 0; for (i = 0; i < max_nodes; i++) { operation = gb_operation_create(info->cport, GB_UART_PROTOCOL_RECEIVE_DATA, sizeof(*request) + buf_size); if (!operation) { goto err_free_op; } node = malloc(sizeof(struct op_node)); if (!node) { gb_operation_destroy(operation); goto err_free_op; } node->operation = operation; request = gb_operation_get_request_payload(operation); node->data_size = &request->size; node->buffer = request->data; put_node_back(queue, node); } return 0; err_free_op: uart_free_op(queue); return -ENOMEM; }
/** * @brief Engage camera capture operation * * It tell camera module to start capture. * * @param operation pointer to structure of Greybus operation message * @return GB_OP_SUCCESS on success, error code on failure */ static uint8_t gb_camera_capture(struct gb_operation *operation) { struct gb_camera_capture_request *request; struct capture_info *capt_req; size_t request_size; int ret; lldbg("gb_camera_capture() + \n"); if (info->state != STATE_CONFIGURED && info->state != STATE_STREAMING) { return GB_OP_INVALID; } request_size = gb_operation_get_request_payload_size(operation); if (request_size < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } request = gb_operation_get_request_payload(operation); if (request->padding != 0) { gb_error("invalid padding value\n"); return GB_OP_INVALID; } capt_req = malloc(sizeof(*capt_req)); if(!capt_req) { return GB_OP_NO_MEMORY; } capt_req->request_id = le32_to_cpu(request->request_id); capt_req->streams = request->streams; capt_req->num_frames = le32_to_cpu(request->num_frames); capt_req->settings = request->settings; capt_req->settings_size = request_size - sizeof(*request); lldbg(" request_id = %d \n", capt_req->request_id); lldbg(" streams = %d \n", capt_req->streams); lldbg(" num_frames = %d \n", capt_req->num_frames); lldbg(" settings_size = %u\n", capt_req->settings_size); ret = device_camera_capture(info->dev, capt_req); if (ret) { gb_error("error in camera capture thread. \n"); ret = gb_errno_to_op_result(ret); goto err_free_mem; } free(capt_req); lldbg("gb_camera_capture() - \n"); return GB_OP_SUCCESS; err_free_mem: free(capt_req); return ret; }
/** * @brief Data receiving process thread * * This function is the thread for processing data receiving tasks. When * it wake up, it checks the receiving queue for processing the come in data. * If protocol is running out of buffer, as soon as it gets a free buffer, * it passes to driver for continuing the receiving. * * @param data The regular thread data. * @return None. */ static void *uart_rx_thread(void *data) { struct gb_operation *operation = NULL; struct gb_uart_receive_data_request *request = NULL; struct buf_node *node = NULL; struct gb_bundle *bundle = data; struct gb_uart_info *info = bundle->priv; struct device *dev = bundle->dev; int ret; while (1) { sem_wait(&info->rx_sem); if (info->thread_stop) { break; } node = get_node_from(&info->data_queue); if (node) { operation = gb_operation_create(info->cport, GB_UART_PROTOCOL_RECEIVE_DATA, sizeof(*request) + node->data_size); if (!operation) { uart_report_error(GB_UART_EVENT_PROTOCOL_ERROR, __func__); } else { request = gb_operation_get_request_payload(operation); request->size = cpu_to_le16(node->data_size); request->flags = node->data_flags; memcpy(request->data, node->buffer, node->data_size); ret = gb_operation_send_request(operation, NULL, false); if (ret) { uart_report_error(GB_UART_EVENT_PROTOCOL_ERROR, __func__); } gb_operation_destroy(operation); } put_node_back(&info->free_queue, node); } /* * In case there is no free node in callback. */ if (info->require_node) { node = get_node_from(&info->free_queue); info->rx_node = node; ret = device_uart_start_receiver(dev, node->buffer, info->rx_buf_size, NULL, NULL, uart_rx_callback); if (ret) { uart_report_error(GB_UART_EVENT_DEVICE_ERROR, __func__); } info->require_node = 0; } } return NULL; }
static uint8_t gb_svc_route_destroy(struct gb_operation *op) { struct gb_svc_route_destroy_request *req; int rc; if (gb_operation_get_request_payload_size(op) < sizeof(*req)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } req = gb_operation_get_request_payload(op); rc = svc_route_destroy(req->intf1_id, req->intf2_id); return gb_errno_to_op_result(rc); }
/* * SVC Protocol Requests */ int gb_svc_protocol_version(void) { struct gb_operation *op_req; struct gb_operation *op_resp; struct gb_svc_protocol_version_request *version_request; struct gb_svc_protocol_version_response *version_response; op_req = gb_operation_create(g_svc_cport, GB_SVC_TYPE_PROTOCOL_VERSION, sizeof(*version_request)); if (!op_req) { return -ENOMEM; } version_request = gb_operation_get_request_payload(op_req); version_request->major = GB_SVC_VERSION_MAJOR; version_request->minor = GB_SVC_VERSION_MINOR; gb_operation_send_request_sync(op_req); op_resp = gb_operation_get_response_op(op_req); if (!op_resp) { gb_operation_destroy(op_req); return GB_OP_PROTOCOL_BAD; } version_response = gb_operation_get_request_payload(op_resp); if (version_response->major > GB_SVC_VERSION_MAJOR) { dbg_error("unsupported major version: %u\n", version_response->major); gb_operation_destroy(op_req); return -EPROTO; } dbg_info("SVC Protocol version_major = %u version_minor = %u\n", version_response->major, version_response->minor); gb_operation_destroy(op_req); return 0; }
static uint8_t gb_svc_connection_destroy(struct gb_operation *op) { int retval; struct gb_svc_conn_destroy_request *req; if (gb_operation_get_request_payload_size(op) < sizeof(*req)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } req = gb_operation_get_request_payload(op); retval = svc_connection_destroy(req->intf1_id, le16_to_cpu(req->cport1_id), req->intf2_id, le16_to_cpu(req->cport2_id)); return gb_errno_to_op_result(retval); }
static uint8_t gb_svc_connection_create(struct gb_operation *op) { struct gb_svc_conn_create_request *req; int rc; if (gb_operation_get_request_payload_size(op) < sizeof(*req)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } req = gb_operation_get_request_payload(op); rc = svc_connection_create(req->intf1_id, le16_to_cpu(req->cport1_id), req->intf2_id, le16_to_cpu(req->cport2_id), req->tc, req->flags); return gb_errno_to_op_result(rc); }
int gb_svc_intf_hot_unplug(uint32_t intf_id) { struct gb_operation *op_req; struct gb_svc_intf_hot_unplug_request *req; op_req = gb_operation_create(g_svc_cport, GB_SVC_TYPE_INTF_HOT_UNPLUG, sizeof(*req)); if (!op_req) { return -ENOMEM; } req = gb_operation_get_request_payload(op_req); req->intf_id = intf_id; gb_operation_send_request_sync(op_req); gb_operation_destroy(op_req); return 0; }
/** * @brief Protocol send break function. * * Requests that the UART generate a break condition on its transmit line. * * @param operation The pointer to structure of gb_operation. * @return GB_OP_SUCCESS on success, error code on failure. */ static uint8_t gb_uart_send_break(struct gb_operation *operation) { int ret; struct gb_uart_set_break_request *request = gb_operation_get_request_payload(operation); if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } ret = device_uart_set_break(info->dev, request->state); if (ret) { return GB_OP_UNKNOWN_ERROR; } return GB_OP_SUCCESS; }
/* * SVC Protocol Request handlers */ static uint8_t gb_svc_intf_device_id(struct gb_operation *op) { struct gb_svc_intf_device_id_request *req; u8 intf_id; u8 dev_id; int rc; if (gb_operation_get_request_payload_size(op) < sizeof(*req)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } req = gb_operation_get_request_payload(op); intf_id = req->intf_id; dev_id = req->device_id; rc = svc_intf_device_id(intf_id, dev_id); return gb_errno_to_op_result(rc); }
static uint8_t __attribute__((unused)) gb_control_intf_pwr_set(struct gb_operation *operation) { struct gb_control_intf_pwr_set_request *request; struct gb_control_intf_pwr_set_response *response; if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } response = gb_operation_alloc_response(operation, sizeof(*response)); if (!response) return GB_OP_NO_MEMORY; request = gb_operation_get_request_payload(operation); (void)request; return GB_OP_PROTOCOL_BAD; }
static uint8_t gb_vibrator_vibrator_on(struct gb_operation *operation) { struct gb_vibrator_on_request *request = gb_operation_get_request_payload(operation); if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } gpio_activate(GB_VIBRATOR_DUMMY_GPIO); gpio_set_value(GB_VIBRATOR_DUMMY_GPIO, 1); usleep(le16_to_cpu(request->timeout_ms)); gpio_deactivate(GB_VIBRATOR_DUMMY_GPIO); return GB_OP_SUCCESS; }
int gb_svc_hello(uint8_t ap_intf_id) { struct gb_operation *op_req; struct gb_svc_hello_request *req; op_req = gb_operation_create(g_svc_cport, GB_SVC_TYPE_HELLO, sizeof(*req)); if (!op_req) { return -EPROTO; } req = gb_operation_get_request_payload(op_req); req->endo_id = cpu_to_le16(GB_ENDO_ID); req->interface_id = ap_intf_id; gb_operation_send_request_sync(op_req); gb_operation_destroy(op_req); return 0; }
/** * @brief Protocol set RTS & DTR line status function. * * Controls RTS and DTR line states of the UART. * * @param operation The pointer to structure of gb_operation. * @return GB_OP_SUCCESS on success, error code on failure. */ static uint8_t gb_uart_set_control_line_state(struct gb_operation *operation) { int ret; uint8_t modem_ctrl = 0; uint16_t control; struct gb_uart_set_control_line_state_request *request = gb_operation_get_request_payload(operation); struct gb_bundle *bundle; if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } bundle = gb_operation_get_bundle(operation); DEBUGASSERT(bundle); ret = device_uart_get_modem_ctrl(bundle->dev, &modem_ctrl); if (ret) { return GB_OP_UNKNOWN_ERROR; } control = le16_to_cpu(request->control); if (control & GB_UART_CTRL_DTR) { modem_ctrl |= MCR_DTR; } else { modem_ctrl &= ~MCR_DTR; } if (control & GB_UART_CTRL_RTS) { modem_ctrl |= MCR_RTS; } else { modem_ctrl &= ~MCR_RTS; } ret = device_uart_set_modem_ctrl(bundle->dev, &modem_ctrl); if (ret) { return GB_OP_UNKNOWN_ERROR; } return GB_OP_SUCCESS; }
static uint8_t gb_control_timesync_authoritative(struct gb_operation *operation) { uint64_t frame_time[GB_TIMESYNC_MAX_STROBES]; struct gb_control_timesync_authoritative_request *request; int i; int retval; if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } request = gb_operation_get_request_payload(operation); for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++) frame_time[i] = le64_to_cpu(request->frame_time[i]); retval = timesync_authoritative(frame_time); return gb_errno_to_op_result(retval); }
static uint8_t gb_control_connected(struct gb_operation *operation) { int retval; struct gb_control_connected_request *request = gb_operation_get_request_payload(operation); if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } retval = gb_listen(le16_to_cpu(request->cport_id)); if (retval) { gb_error("Can not connect cport %d: error %d\n", le16_to_cpu(request->cport_id), retval); return GB_OP_INVALID; } return GB_OP_SUCCESS; }
/** * @brief performs the desired reboot type specified in the mode field * of the request. */ static uint8_t gb_control_reboot(struct gb_operation *operation) { struct gb_control_reboot_request *request = gb_operation_get_request_payload(operation); if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } switch (request->mode) { case GB_CONTROL_REBOOT_MODE_BOOTLOADER: return gb_control_reboot_flash(operation); case GB_CONTROL_REBOOT_MODE_RESET: return gb_control_reboot_reset(operation); } gb_error("unsupported reboot mode\n"); return GB_OP_INVALID; }
static uint8_t gb_control_timesync_enable(struct gb_operation *operation) { uint8_t count; uint64_t frame_time; uint32_t strobe_delay; uint32_t refclk; struct gb_control_timesync_enable_request *request; int retval; if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } request = gb_operation_get_request_payload(operation); count = request->count; frame_time = le64_to_cpu(request->frame_time); strobe_delay = le32_to_cpu(request->strobe_delay); refclk = le32_to_cpu(request->refclk); retval = timesync_enable(count, frame_time, strobe_delay, refclk); return gb_errno_to_op_result(retval); }
static uint8_t gb_ptp_set_max_input_current(struct gb_operation *operation) { #ifndef CONFIG_GREYBUS_PTP_INT_RCV_NEVER struct gb_ptp_set_max_input_current_request *request; uint32_t current; int ret; if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("%s(): dropping short message\n", __func__); return GB_OP_INVALID; } request = gb_operation_get_request_payload(operation); current = le32_to_cpu(request->current); ret = device_ptp_set_max_input_current(ptp_info->dev, current); if (ret) return GB_OP_UNKNOWN_ERROR; return GB_OP_SUCCESS; #else return GB_OP_INVALID; #endif }
int gb_svc_intf_hotplug(uint32_t intf_id, uint32_t unipro_mfg_id, uint32_t unipro_prod_id, uint32_t ara_vend_id, uint32_t ara_prod_id) { struct gb_operation *op_req; struct gb_svc_intf_hotplug_request *req; op_req = gb_operation_create(g_svc_cport, GB_SVC_TYPE_INTF_HOTPLUG, sizeof(*req)); if (!op_req) { return -ENOMEM; } req = gb_operation_get_request_payload(op_req); req->intf_id = intf_id; req->data.unipro_mfg_id = unipro_mfg_id; req->data.unipro_prod_id = unipro_prod_id; req->data.ara_vend_id = ara_vend_id; req->data.ara_prod_id = ara_prod_id; gb_operation_send_request_sync(op_req); gb_operation_destroy(op_req); return 0; }
static uint8_t gb_ptp_protocol_version(struct gb_operation *operation) { struct gb_ptp_proto_version_request *request; struct gb_ptp_proto_version_response *response; if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } request = gb_operation_get_request_payload(operation); ptp_info->host_major = request->major; ptp_info->host_minor = request->minor; response = gb_operation_alloc_response(operation, sizeof(*response)); if (!response) { return GB_OP_NO_MEMORY; } response->major = GB_PTP_VERSION_MAJOR; response->minor = GB_PTP_VERSION_MINOR; return GB_OP_SUCCESS; }
/** * @brief Protocol set line coding function. * * Sets the line settings of the UART to the specified baud rate, format, * parity, and data bits. * * @param operation The pointer to structure of gb_operation. * @return GB_OP_SUCCESS on success, error code on failure. */ static uint8_t gb_uart_set_line_coding(struct gb_operation *operation) { int ret; uint32_t baud; enum uart_parity parity; enum uart_stopbit stopbit; uint8_t databits; struct gb_serial_line_coding_request *request = gb_operation_get_request_payload(operation); struct gb_bundle *bundle; if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message\n"); return GB_OP_INVALID; } bundle = gb_operation_get_bundle(operation); DEBUGASSERT(bundle); baud = le32_to_cpu(request->rate); switch (request->format) { case GB_SERIAL_1_STOP_BITS: stopbit = ONE_STOP_BIT; break; case GB_SERIAL_1_5_STOP_BITS: stopbit = ONE5_STOP_BITS; break; case GB_SERIAL_2_STOP_BITS: stopbit = TWO_STOP_BITS; break; default: return GB_OP_INVALID; break; } switch (request->parity) { case GB_SERIAL_NO_PARITY: parity = NO_PARITY; break; case GB_SERIAL_ODD_PARITY: parity = ODD_PARITY; break; case GB_SERIAL_EVEN_PARITY: parity = EVEN_PARITY; break; case GB_SERIAL_MARK_PARITY: parity = MARK_PARITY; break; case GB_SERIAL_SPACE_PARITY: parity = SPACE_PARITY; break; default: return GB_OP_INVALID; break; } if (request->data > 8 || request->data < 5) { return GB_OP_INVALID; } databits = request->data; ret = device_uart_set_configuration(bundle->dev, baud, parity, databits, stopbit, 1); /* 1 for auto flow control enable */ if (ret) { return GB_OP_UNKNOWN_ERROR; } return GB_OP_SUCCESS; }
/** * @brief Configure camera module streams * * The Configure Streams operation configures or unconfigures the Camera Module * to prepare or stop video capture. * * @param operation pointer to structure of Greybus operation message * @return GB_OP_SUCCESS on success, error code on failure */ static uint8_t gb_camera_configure_streams(struct gb_operation *operation) { struct gb_camera_configure_streams_request *request; struct gb_camera_configure_streams_response *response; struct gb_stream_config_req *cfg_set_req; struct gb_stream_config_resp *cfg_ans_resp; struct streams_cfg_req *cfg_request; struct streams_cfg_ans *cfg_answer; uint8_t num_streams; uint8_t res_flags = 0; int i, ret; lldbg("gb_camera_configure_streams() + \n"); if (gb_operation_get_request_payload_size(operation) < sizeof(*request)) { gb_error("dropping short message \n"); return GB_OP_INVALID; } request = gb_operation_get_request_payload(operation); num_streams = request->num_streams; lldbg("num_streams = %d \n", num_streams); lldbg("req flags = %d \n", request->flags); if (num_streams > MAX_STREAMS_NUM) return GB_OP_INVALID; /* Check if the request is acceptable in the current state. */ if (num_streams == 0) { if (info->state < STATE_UNCONFIGURED || info->state > STATE_CONFIGURED) return GB_OP_INVALID; } else { if (info->state != STATE_UNCONFIGURED) return GB_OP_INVALID; } /* * Zero streams unconfigures the camera, move to the unconfigured state and * power it down. */ if (num_streams == 0) { info->state = STATE_UNCONFIGURED; ret = device_camera_power_down(info->dev); if (ret) return gb_errno_to_op_result(ret); response = gb_operation_alloc_response(operation, sizeof(*response)); return GB_OP_SUCCESS; } /* Otherwise pass stream configuration to the camera module. */ cfg_set_req = request->config; cfg_request = malloc(num_streams * sizeof(*cfg_request)); if (!cfg_request) return GB_OP_NO_MEMORY; /* convert data for driver */ for (i = 0; i < num_streams; i++) { lldbg(" stream #%d\n", i); cfg_request[i].width = le16_to_cpu(cfg_set_req[i].width); cfg_request[i].height = le16_to_cpu(cfg_set_req[i].height); cfg_request[i].format = le16_to_cpu(cfg_set_req[i].format); cfg_request[i].padding = le16_to_cpu(cfg_set_req[i].padding); lldbg(" width = %d \n", cfg_request[i].width); lldbg(" height = %d \n", cfg_request[i].height); lldbg(" format = %d \n", cfg_request[i].format); lldbg(" padding = %d \n", cfg_request[i].padding); } /* alloc for getting answer from driver */ cfg_answer = malloc(MAX_STREAMS_NUM * sizeof(*cfg_answer)); if (!cfg_answer) { ret = GB_OP_NO_MEMORY; goto err_free_req_mem; } /* driver shall check the num_streams, it can't exceed its capability */ ret = device_camera_set_streams_cfg(info->dev, &num_streams, request->flags, cfg_request, &res_flags, cfg_answer); if (ret) { /* FIXME: * add greybus protocol error for EIO operations. * For now, return OP_INVALID */ lldbg("Camera module reported error in configure stream %d\n", ret); ret = GB_OP_INVALID; goto err_free_ans_mem; } /* * If the requested format is not supported keep camera in un-configured * state; * Stay un-configured anyhow if AP is just testing format; * Move to configured otherwise */ if (res_flags & CAMERA_CONF_STREAMS_ADJUSTED) info->state = STATE_UNCONFIGURED; else if (request->flags & CAMERA_CONF_STREAMS_TEST_ONLY) info->state = STATE_UNCONFIGURED; else info->state = STATE_CONFIGURED; /* Create and fill the greybus response. */ lldbg("Resp: \n"); response = gb_operation_alloc_response(operation, sizeof(*response) + num_streams * sizeof(*cfg_ans_resp)); response->num_streams = num_streams; response->flags = res_flags; response->padding[0] = 0; response->padding[1] = 0; lldbg("flags = 0x%2x: \n", response->flags); for (i = 0; i < num_streams; i++) { cfg_ans_resp = &response->config[i]; lldbg("\n"); lldbg(" width = %d \n", cfg_answer[i].width); lldbg(" height = %d \n", cfg_answer[i].height); lldbg(" format = %d \n", cfg_answer[i].format); lldbg(" virtual_channel = %d \n", cfg_answer[i].virtual_channel); lldbg(" data_type = %d \n", cfg_answer[i].data_type); lldbg(" max_size = %d \n", cfg_answer[i].max_size); cfg_ans_resp->width = cpu_to_le16(cfg_answer[i].width); cfg_ans_resp->height = cpu_to_le16(cfg_answer[i].height); cfg_ans_resp->format = cpu_to_le16(cfg_answer[i].format); cfg_ans_resp->virtual_channel = cfg_answer[i].virtual_channel; /* * FIXME * The API towards the camera driver supports a single data type * for now, always return NOT_USED for the second data type */ cfg_ans_resp->data_type[0] = cfg_answer[i].data_type; cfg_ans_resp->data_type[1] = GB_CAM_DT_NOT_USED; cfg_ans_resp->padding[0] = 0; cfg_ans_resp->padding[1] = 0; cfg_ans_resp->padding[2] = 0; cfg_ans_resp->max_size = cpu_to_le32(cfg_answer[i].max_size); } ret = GB_OP_SUCCESS; err_free_ans_mem: free(cfg_answer); err_free_req_mem: free(cfg_request); lldbg("gb_camera_configure_streams() %d - \n", ret); return ret; }