// state machines static void vsfusbd_HID_INREPORT_callback(void *param) { struct vsfusbd_HID_param_t *HID_param = (struct vsfusbd_HID_param_t *)param; vsfsm_post_evt(&HID_param->iface->sm, VSFUSBD_HID_EVT_INREPORT); }
vsf_err_t vsfusbd_HID_IN_report_changed(struct vsfusbd_HID_param_t *param, struct vsfusbd_HID_report_t *report) { report->changed = true; if (!param->busy) { vsfsm_post_evt(¶m->iface->sm, VSFUSBD_HID_EVT_INREPORT); } return VSFERR_NONE; }
static void vsfusbd_HID_INREPORT_callback(void *param) { struct vsfusbd_HID_param_t *HID_param = (struct vsfusbd_HID_param_t *)param; HID_param->busy = false; if (HID_param->on_report_out != NULL) { HID_param->on_report_out(HID_param); } vsfsm_post_evt(&HID_param->iface->sm, VSFUSBD_HID_EVT_INREPORT); }
static struct vsfsm_state_t * vsfusbd_CDCData_evt_handler(struct vsfsm_t *sm, vsfsm_evt_t evt) { struct vsfusbd_CDC_param_t *param = (struct vsfusbd_CDC_param_t *)sm->user_data; struct vsfusbd_device_t *device = param->device; switch (evt) { case VSFSM_EVT_INIT: param->stream_tx->callback_rx.param = param; param->stream_tx->callback_rx.on_inout = vsfusbd_CDCData_streamtx_on_in; param->stream_tx->callback_rx.on_connect = vsfusbd_CDCData_streamtx_on_txconn; param->stream_rx->callback_tx.param = param; param->stream_rx->callback_tx.on_inout = vsfusbd_CDCData_streamrx_on_out; param->stream_rx->callback_tx.on_connect = vsfusbd_CDCData_streamrx_on_rxconn; param->out_enable = false; param->in_enable = false; break; case VSFUSBD_CDC_EVT_STREAMTX_ONCONN: vsfusbd_set_IN_handler(device, param->ep_in, vsfusbd_CDCData_IN_hanlder); break; case VSFUSBD_CDC_EVT_STREAMRX_ONCONN: vsfusbd_set_OUT_handler(device, param->ep_out, vsfusbd_CDCData_OUT_hanlder); vsfsm_post_evt(sm, VSFUSBD_CDC_EVT_STREAMRX_ONOUT); break; case VSFUSBD_CDC_EVT_STREAMTX_ONIN: if (!param->in_enable) { param->in_enable = true; vsfusbd_CDCData_IN_hanlder(param->device, param->ep_in); } break; case VSFUSBD_CDC_EVT_STREAMRX_ONOUT: if (!param->out_enable && (stream_get_free_size(param->stream_rx) >= device->drv->ep.get_OUT_epsize(param->ep_out))) { param->out_enable = true; device->drv->ep.enable_OUT(param->ep_out); } break; } return NULL; }
vsf_err_t vsfsm_init(struct vsfsm_t *sm) { sm->evt_count = 0; #if VSFSM_CFG_SYNC_EN sm->pending_next = NULL; #endif #if VSFSM_CFG_SM_EN || VSFSM_CFG_HSM_EN sm->cur_state = &sm->init_state; #endif // ignore any state transition on VSFSM_EVT_ENTER sm->init_state.evt_handler(sm, VSFSM_EVT_ENTER); #if VSFSM_CFG_ACTIVE_EN // set active so that sm can accept events vsfsm_set_active(sm, true); #endif // process state transition on VSFSM_EVT_INIT return vsfsm_post_evt(sm, VSFSM_EVT_INIT); }
vsf_err_t vsfsm_sync_increase(struct vsfsm_sync_t *sync) { struct vsfsm_t *sm; if (sync->sm_pending) { sm = sync->sm_pending; sync->sm_pending = sync->sm_pending->pending_next; if (vsfsm_post_evt(sm, sync->evt)) { // should increase the evtq buffer size return VSFERR_BUG; } } else if (sync->cur_value < sync->max_value) { sync->cur_value++; } else { return VSFERR_BUG; } return VSFERR_NONE; }
static vsf_err_t vsfsm_dispatch_evt(struct vsfsm_t *sm, vsfsm_evt_t evt) { #if VSFSM_CFG_SM_EN && VSFSM_CFG_HSM_EN struct vsfsm_state_t *temp_state = NULL, *lca_state; struct vsfsm_state_t *temp_processor_state, *temp_target_state; struct vsfsm_state_t *processor_state = sm->cur_state; struct vsfsm_state_t *target_state = processor_state->evt_handler(sm, evt); #elif VSFSM_CFG_SM_EN struct vsfsm_state_t *target_state = sm->cur_state->evt_handler(sm, evt); #else sm->init_state.evt_handler(sm, evt); #endif #if !VSFSM_CFG_SM_EN return VSFERR_NONE; #else // local event can not transmit or be passed to superstate if (evt >= VSFSM_EVT_LOCAL) { return VSFERR_NONE; } #if VSFSM_CFG_HSM_EN // superstate while (target_state == (struct vsfsm_state_t *)-1) { processor_state = sm->cur_state->super; if (processor_state != NULL) { target_state = processor_state->evt_handler(sm, evt); } } #endif if ((NULL == target_state) #if !VSFSM_CFG_HSM_EN || ((struct vsfsm_state_t *)-1 == target_state) #endif ) { // handled, or even topstate can not handle this event return VSFERR_NONE; } // need to transmit #if VSFSM_CFG_HSM_EN // 1. exit to processor_state for (temp_state = sm->cur_state; temp_state != processor_state;) { temp_state->evt_handler(sm, VSFSM_EVT_EXIT); temp_state = temp_state->super; } // 2. some simple transition which happens in most cases if ((processor_state == target_state) || (processor_state->super == target_state->super)) { processor_state->evt_handler(sm, VSFSM_EVT_EXIT); target_state->evt_handler(sm, VSFSM_EVT_ENTER); goto update_cur_state; } if (processor_state->super == target_state) { processor_state->evt_handler(sm, VSFSM_EVT_EXIT); goto update_cur_state; } if (processor_state == target_state->super) { target_state->evt_handler(sm, VSFSM_EVT_ENTER); goto update_cur_state; } // 3. find the LCA lca_state = NULL; temp_processor_state = processor_state; temp_target_state = target_state; do { if (temp_processor_state != NULL) { if (vsfsm_is_in(temp_processor_state, target_state)) { lca_state = temp_processor_state; break; } temp_processor_state = temp_processor_state->super; } if (temp_target_state != NULL) { if (vsfsm_is_in(temp_target_state, processor_state)) { lca_state = temp_target_state; break; } temp_target_state = temp_target_state->super; } if ((NULL == temp_processor_state) && (NULL == temp_target_state)) { return VSFERR_BUG; } } while (NULL == lca_state); // 4. exit from processor_state to lca for (temp_state = processor_state; temp_state != lca_state;) { temp_state->evt_handler(sm, VSFSM_EVT_EXIT); temp_state = temp_state->super; } // 5. enter from lca to target_state for (temp_state = lca_state; temp_state != target_state;) { temp_state->evt_handler(sm, VSFSM_EVT_ENTER); temp_state = temp_state->super; } // 6. update cur_state update_cur_state: sm->cur_state = target_state; // 7. send VSFSM_EVT_INIT to target_state #else sm->cur_state->evt_handler(sm, VSFSM_EVT_EXIT); sm->cur_state = target_state; sm->cur_state->evt_handler(sm, VSFSM_EVT_ENTER); #endif return vsfsm_post_evt(sm, VSFSM_EVT_INIT); #endif }
static void vsfip_dhcpc_input(void *param, struct vsfip_buffer_t *buf) { struct vsfip_dhcpc_t *dhcpc = (struct vsfip_dhcpc_t *)param; struct vsfip_netif_t *netif = dhcpc->netif; struct vsfip_dhcphead_t *head; uint8_t optlen; uint8_t *optptr; head = (struct vsfip_dhcphead_t *)buf->app.buffer; if ((head->op != DHCP_TOCLIENT) || (head->magic != SYS_TO_BE_U32(DHCP_MAGIC)) || memcmp(head->chaddr, netif->macaddr.addr.s_addr_buf, netif->macaddr.size) || (head->xid != dhcpc->xid)) { goto exit; } optlen = vsfip_dhcp_get_opt(buf, DHCPOPT_MSGTYPE, &optptr); if (optlen != DHCPOPT_MSGTYPE_LEN) { goto exit; } switch (optptr[0]) { case DHCPOP_OFFER: dhcpc->ipaddr.size = 4; dhcpc->ipaddr.addr.s_addr = head->yiaddr; vsfsm_post_evt(&dhcpc->sm, VSFIP_DHCP_EVT_SEND_REQUEST); break; case DHCPOP_ACK: optlen = vsfip_dhcp_get_opt(buf, DHCPOPT_LEASE_TIME, &optptr); dhcpc->leasetime = (4 == optlen) ? GET_BE_U32(optptr) : 0; optlen = vsfip_dhcp_get_opt(buf, DHCPOPT_RENEW_TIME, &optptr); dhcpc->renew_time = (4 == optlen) ? GET_BE_U32(optptr) : 0; optlen = vsfip_dhcp_get_opt(buf, DHCPOPT_REBINDING_TIME, &optptr); dhcpc->rebinding_time = (4 == optlen) ? GET_BE_U32(optptr) : 0; optlen = vsfip_dhcp_get_opt(buf, DHCPOPT_SUBNETMASK, &optptr); dhcpc->netmask.size = optlen; dhcpc->netmask.addr.s_addr = (4 == optlen) ? *(uint32_t *)optptr : 0; optlen = vsfip_dhcp_get_opt(buf, DHCPOPT_ROUTER, &optptr); dhcpc->gw.size = optlen; dhcpc->gw.addr.s_addr = (4 == optlen) ? *(uint32_t *)optptr : 0; optlen = vsfip_dhcp_get_opt(buf, DHCPOPT_DNSSERVER, &optptr); dhcpc->dns[0].size = dhcpc->dns[1].size = 0; if (optlen >= 4) { dhcpc->dns[0].size = 4; dhcpc->dns[0].addr.s_addr = *(uint32_t *)optptr; if (optlen >= 8) { dhcpc->dns[1].size = 4; dhcpc->dns[1].addr.s_addr = *(uint32_t *)(optptr + 4); } } vsfsm_post_evt(&dhcpc->sm, VSFIP_DHCP_EVT_READY); break; } exit: vsfip_buffer_release(buf); }
static struct vsfsm_state_t * vsfshell_evt_handler(struct vsfsm_t *sm, vsfsm_evt_t evt) { struct vsfshell_t *shell = (struct vsfshell_t *)sm->user_data; switch (evt) { case VSFSM_EVT_INIT: shell->prompted = false; shell->output_interrupted = false; shell->tbuffer.buffer.buffer = (uint8_t *)shell->cmd_buff; shell->tbuffer.buffer.size = sizeof(shell->cmd_buff); shell->tbuffer.position = 0; vsfsm_crit_init(&shell->output_crit, VSFSHELL_EVT_OUTPUT_CRIT_AVAIL); shell->stream_rx->callback_rx.param = shell; shell->stream_rx->callback_rx.on_inout = vsfshell_streamrx_on_in; shell->stream_rx->callback_rx.on_connect = vsfshell_streamrx_on_txconn; shell->stream_tx->callback_tx.param = shell; shell->stream_tx->callback_tx.on_inout = vsfshell_streamtx_on_out; shell->stream_tx->callback_tx.on_connect = vsfshell_streamtx_on_rxconn; // shell->output_pt is only called by shell->input_pt shell->output_pt.thread = (vsfsm_pt_thread_t)vsfshell_output_thread; shell->output_pt.sm = sm; shell->output_pt.user_data = shell; // shell->input_pt is used to hanlde the events from stream_rx shell->input_pt.thread = vsfshell_input_thread; shell->input_pt.sm = sm; shell->input_pt.user_data = shell; shell->input_pt.state = 0; shell->input_pt.thread(&shell->input_pt, VSFSM_EVT_INIT); // default input sm is shell itself shell->input_sm = &shell->sm; stream_connect_rx(shell->stream_rx); stream_connect_tx(shell->stream_tx); break; case VSFSHELL_EVT_STREAMRX_ONCONN: break; case VSFSHELL_EVT_STREAMTX_ONCONN: // pass to shell->input_pt shell->input_pt.thread(&shell->input_pt, evt); break; case VSFSHELL_EVT_STREAMRX_ONIN: if (shell->input_sm == &shell->sm) { // pass to shell->input_pt shell->input_pt.thread(&shell->input_pt, evt); } else if (shell->input_sm != NULL) { vsfsm_post_evt(shell->input_sm, evt); } break; case VSFSHELL_EVT_STREAMTX_ONOUT: if (shell->output_sm == &shell->sm) { // pass to shell->input_pt shell->input_pt.thread(&shell->input_pt, evt); } else if (shell->output_sm != NULL) { vsfsm_post_evt(shell->output_sm, evt); } break; } return NULL; }