/** * @brief Modem and line status event init process * * This function creates one operations and uses that request of operation * for sending the status change event to peer. * * @param info Pointer to struct gb_uart_info. * @return 0 on success, error code on failure. */ static int uart_status_cb_init(struct gb_uart_info *info) { int ret; info->ms_ls_operation = gb_operation_create(info->cport, GB_UART_PROTOCOL_SERIAL_STATE, sizeof(struct gb_uart_serial_state_request)); if (!info->ms_ls_operation) { return -ENOMEM; } ret = sem_init(&info->status_sem, 0, 0); if (ret) { goto err_destroy_ms_ls_op; } ret = pthread_create(&info->status_thread, NULL, uart_status_thread, info); if (ret) { goto err_destroy_status_sem; } return 0; err_destroy_status_sem: sem_destroy(&info->status_sem); err_destroy_ms_ls_op: gb_operation_destroy(info->ms_ls_operation); return -ret; }
/** * @brief Allocate operations for receiver buffers * * This function is allocating operation and use them as receiving buffers. * * @param max_nodes Maximum nodes. * @param buf_size Buffer size in operation. * @param queue Target queue. * @return 0 for success, -errno for failures. */ static int uart_alloc_op(int max_nodes, int buf_size, sq_queue_t *queue) { struct gb_operation *operation = NULL; struct gb_uart_receive_data_request *request = NULL; struct op_node *node = NULL; int i = 0; for (i = 0; i < max_nodes; i++) { operation = gb_operation_create(info->cport, GB_UART_PROTOCOL_RECEIVE_DATA, sizeof(*request) + buf_size); if (!operation) { goto err_free_op; } node = malloc(sizeof(struct op_node)); if (!node) { gb_operation_destroy(operation); goto err_free_op; } node->operation = operation; request = gb_operation_get_request_payload(operation); node->data_size = &request->size; node->buffer = request->data; put_node_back(queue, node); } return 0; err_free_op: uart_free_op(queue); return -ENOMEM; }
/** * @brief Data receiving process thread * * This function is the thread for processing data receiving tasks. When * it wake up, it checks the receiving queue for processing the come in data. * If protocol is running out of buffer, as soon as it gets a free buffer, * it passes to driver for continuing the receiving. * * @param data The regular thread data. * @return None. */ static void *uart_rx_thread(void *data) { struct gb_operation *operation = NULL; struct gb_uart_receive_data_request *request = NULL; struct buf_node *node = NULL; struct gb_bundle *bundle = data; struct gb_uart_info *info = bundle->priv; struct device *dev = bundle->dev; int ret; while (1) { sem_wait(&info->rx_sem); if (info->thread_stop) { break; } node = get_node_from(&info->data_queue); if (node) { operation = gb_operation_create(info->cport, GB_UART_PROTOCOL_RECEIVE_DATA, sizeof(*request) + node->data_size); if (!operation) { uart_report_error(GB_UART_EVENT_PROTOCOL_ERROR, __func__); } else { request = gb_operation_get_request_payload(operation); request->size = cpu_to_le16(node->data_size); request->flags = node->data_flags; memcpy(request->data, node->buffer, node->data_size); ret = gb_operation_send_request(operation, NULL, false); if (ret) { uart_report_error(GB_UART_EVENT_PROTOCOL_ERROR, __func__); } gb_operation_destroy(operation); } put_node_back(&info->free_queue, node); } /* * In case there is no free node in callback. */ if (info->require_node) { node = get_node_from(&info->free_queue); info->rx_node = node; ret = device_uart_start_receiver(dev, node->buffer, info->rx_buf_size, NULL, NULL, uart_rx_callback); if (ret) { uart_report_error(GB_UART_EVENT_DEVICE_ERROR, __func__); } info->require_node = 0; } } return NULL; }
/** * @brief Free operations * * This funciton destroy operations and node memory. * * @param queue Target queue. * @return None. */ static void uart_free_op(sq_queue_t *queue) { struct op_node *node = NULL; node = get_node_from(queue); while (node) { gb_operation_destroy(node->operation); free(node); node = get_node_from(queue); } }
/** * @brief Releases resources for status change thread * * Terminates the thread for status change and releases the system resouces and * operations allocated by uart_status_cb_init(). * * @param info Pointer to struct gb_uart_info. * @return None. */ static void uart_status_cb_deinit(struct gb_uart_info *info) { if (info->status_thread != (pthread_t)0) { info->thread_stop = 1; sem_post(&info->status_sem); pthread_join(info->status_thread, NULL); } sem_destroy(&info->status_sem); if (info->ms_ls_operation) { gb_operation_destroy(info->ms_ls_operation); } }
/* * SVC Protocol Requests */ int gb_svc_protocol_version(void) { struct gb_operation *op_req; struct gb_operation *op_resp; struct gb_svc_protocol_version_request *version_request; struct gb_svc_protocol_version_response *version_response; op_req = gb_operation_create(g_svc_cport, GB_SVC_TYPE_PROTOCOL_VERSION, sizeof(*version_request)); if (!op_req) { return -ENOMEM; } version_request = gb_operation_get_request_payload(op_req); version_request->major = GB_SVC_VERSION_MAJOR; version_request->minor = GB_SVC_VERSION_MINOR; gb_operation_send_request_sync(op_req); op_resp = gb_operation_get_response_op(op_req); if (!op_resp) { gb_operation_destroy(op_req); return GB_OP_PROTOCOL_BAD; } version_response = gb_operation_get_request_payload(op_resp); if (version_response->major > GB_SVC_VERSION_MAJOR) { dbg_error("unsupported major version: %u\n", version_response->major); gb_operation_destroy(op_req); return -EPROTO; } dbg_info("SVC Protocol version_major = %u version_minor = %u\n", version_response->major, version_response->minor); gb_operation_destroy(op_req); return 0; }
int gb_svc_intf_hot_unplug(uint32_t intf_id) { struct gb_operation *op_req; struct gb_svc_intf_hot_unplug_request *req; op_req = gb_operation_create(g_svc_cport, GB_SVC_TYPE_INTF_HOT_UNPLUG, sizeof(*req)); if (!op_req) { return -ENOMEM; } req = gb_operation_get_request_payload(op_req); req->intf_id = intf_id; gb_operation_send_request_sync(op_req); gb_operation_destroy(op_req); return 0; }
int gb_svc_hello(uint8_t ap_intf_id) { struct gb_operation *op_req; struct gb_svc_hello_request *req; op_req = gb_operation_create(g_svc_cport, GB_SVC_TYPE_HELLO, sizeof(*req)); if (!op_req) { return -EPROTO; } req = gb_operation_get_request_payload(op_req); req->endo_id = cpu_to_le16(GB_ENDO_ID); req->interface_id = ap_intf_id; gb_operation_send_request_sync(op_req); gb_operation_destroy(op_req); return 0; }
static int gb_ptp_changed(enum ptp_change change) { struct gb_operation *operation; uint8_t type; int ret; /* Do not notify core until connected */ if (!ptp_info->connected) return 0; switch(change) { case POWER_PRESENT: type = GB_PTP_TYPE_EXT_POWER_CHANGED; break; case POWER_REQUIRED: type = GB_PTP_TYPE_POWER_REQUIRED_CHANGED; break; case POWER_AVAILABLE: if (!GB_PTP_SUPPORTS(ptp_info->host_major, ptp_info->host_minor, POWER_AVAILABLE_CHANGED)) return 0; type = GB_PTP_TYPE_POWER_AVAILABLE_CHANGED; break; default: return -EINVAL; } operation = gb_operation_create(ptp_info->cport, type, 0); if (!operation) { gb_error("%s(): failed to create operation\n", __func__); return -ENOMEM; } ret = gb_operation_send_request(operation, NULL, false); if (ret) gb_error("%s(): failed to send request\n", __func__); gb_operation_destroy(operation); return ret; }
int gb_svc_intf_hotplug(uint32_t intf_id, uint32_t unipro_mfg_id, uint32_t unipro_prod_id, uint32_t ara_vend_id, uint32_t ara_prod_id) { struct gb_operation *op_req; struct gb_svc_intf_hotplug_request *req; op_req = gb_operation_create(g_svc_cport, GB_SVC_TYPE_INTF_HOTPLUG, sizeof(*req)); if (!op_req) { return -ENOMEM; } req = gb_operation_get_request_payload(op_req); req->intf_id = intf_id; req->data.unipro_mfg_id = unipro_mfg_id; req->data.unipro_prod_id = unipro_prod_id; req->data.ara_vend_id = ara_vend_id; req->data.ara_prod_id = ara_prod_id; gb_operation_send_request_sync(op_req); gb_operation_destroy(op_req); return 0; }