bool Socket::receive( MessageHeader& messageHeader, QByteArray& message ) { if ( !_receive( messageHeader )) return false; // get the message if( messageHeader.size > 0 ) { message = _socket->read( messageHeader.size ); while( message.size() < int(messageHeader.size) ) { if ( !_socket->waitForReadyRead( RECEIVE_TIMEOUT_MS )) return false; message.append( _socket->read( messageHeader.size - message.size( ))); } } if( messageHeader.type == MESSAGE_TYPE_QUIT ) { _socket->disconnectFromHost(); return false; } return true; }
void Widget::receive( Msg * pMsg ) { // SetRepost before _receive() so that subclasses can swallow the respost. switch( pMsg->type() ) { case MsgType::MouseMove: case MsgType::MousePress: case MsgType::MouseRepeat: case MsgType::MouseDrag: case MsgType::MouseRelease: case MsgType::MouseClick: case MsgType::MouseDoubleClick: case MsgType::KeyPress: case MsgType::KeyRepeat: case MsgType::KeyRelease: case MsgType::WheelRoll: { Widget_p pParent = parent(); if( pParent ) pMsg->setRepost(pParent,pParent); break; } default: break; } _receive( pMsg ); }
void RealmConnection::_complete_packet(PacketPtr packet_ptr) { UT_DEBUGMSG(("RealmConnection::_complete_packet()\n")); int bytes_needed = packet_ptr->complete(m_buf.data(), m_buf.size()); switch (bytes_needed) { case -1: UT_DEBUGMSG(("Error determining packet (type: 0x%x) completion state!\n", packet_ptr->type())); return; case 0: { UT_DEBUGMSG(("Read full packet\n")); UT_return_if_fail(packet_ptr->parse(m_buf.data(), m_buf.size()) != -1); m_packet_queue.push(packet_ptr); _receive(); } break; default: UT_DEBUGMSG(("Need more data (%d bytes) for this packet...\n", bytes_needed)); // read the needed number of bytes char* ptr = m_buf.prepare(bytes_needed); asio::async_read(m_socket, asio::buffer(ptr, bytes_needed), boost::bind(&RealmConnection::_complete, shared_from_this(), asio::placeholders::error, asio::placeholders::bytes_transferred, packet_ptr) ); break; } }
void SMTP_helo(T S, const char *name) { ASSERT(S); S->name = name; _send(S, "EHLO %s\r\n", name); TRY { _receive(S, 250, _parseFlags); } ELSE { // If EHLO failed, fallback to HELO, but if it fails too, let the exception bubble up _send(S, "HELO %s\r\n", name); _receive(S, 250, NULL); } END_TRY; S->state = SMTP_Helo; }
static void *_event_loop(void *args) { msg_t msg, reply; (void)args; msg_init_queue(_msg_q, GNRC_RPL_MSG_QUEUE_SIZE); /* preinitialize ACK */ reply.type = GNRC_NETAPI_MSG_TYPE_ACK; trickle_t *trickle; /* start event loop */ while (1) { DEBUG("RPL: waiting for incoming message.\n"); msg_receive(&msg); switch (msg.type) { case GNRC_RPL_MSG_TYPE_LIFETIME_UPDATE: DEBUG("RPL: GNRC_RPL_MSG_TYPE_LIFETIME_UPDATE received\n"); _update_lifetime(); break; case GNRC_RPL_MSG_TYPE_TRICKLE_INTERVAL: DEBUG("RPL: GNRC_RPL_MSG_TYPE_TRICKLE_INTERVAL received\n"); trickle = msg.content.ptr; if (trickle && (trickle->callback.func != NULL)) { trickle_interval(trickle); } break; case GNRC_RPL_MSG_TYPE_TRICKLE_CALLBACK: DEBUG("RPL: GNRC_RPL_MSG_TYPE_TRICKLE_CALLBACK received\n"); trickle = msg.content.ptr; if (trickle && (trickle->callback.func != NULL)) { trickle_callback(trickle); } break; case GNRC_NETAPI_MSG_TYPE_RCV: DEBUG("RPL: GNRC_NETAPI_MSG_TYPE_RCV received\n"); _receive(msg.content.ptr); break; case GNRC_NETAPI_MSG_TYPE_SND: break; case GNRC_NETAPI_MSG_TYPE_GET: case GNRC_NETAPI_MSG_TYPE_SET: DEBUG("RPL: reply to unsupported get/set\n"); reply.content.value = -ENOTSUP; msg_reply(&msg, &reply); break; default: break; } } return NULL; }
RTAI_SYSCALL_MODE int _rt_msg_receive_if(RT_MSGQ *mq, void *msg, int msg_size, int *msgpri, int space) { if (rt_sem_wait_if(&mq->receivers) <= 0) { return msg_size; } if (rt_sem_wait_if(&mq->received) <= 0) { ; rt_sem_signal(&mq->receivers); return msg_size; } return _receive(mq, msg, msg_size, msgpri, space); }
RTAI_SYSCALL_MODE int _rt_msg_receive_until(RT_MSGQ *mq, void *msg, int msg_size, int *msgpri, RTIME until, int space) { int retval; if ((retval = rt_sem_wait_until(&mq->receivers, until)) >= RTE_LOWERR) { return TBX_RET(msg_size, retval); } if ((retval = rt_sem_wait_until(&mq->received, until)) >= RTE_LOWERR) { rt_sem_signal(&mq->receivers); return TBX_RET(msg_size, retval); } return _receive(mq, msg, msg_size, msgpri, space); }
static void *_event_loop(void *args) { msg_t msg, reply, msg_q[GNRC_SIXLOWPAN_MSG_QUEUE_SIZE]; gnrc_netreg_entry_t me_reg = GNRC_NETREG_ENTRY_INIT_PID(GNRC_NETREG_DEMUX_CTX_ALL, sched_active_pid); (void)args; msg_init_queue(msg_q, GNRC_SIXLOWPAN_MSG_QUEUE_SIZE); /* register interest in all 6LoWPAN packets */ gnrc_netreg_register(GNRC_NETTYPE_SIXLOWPAN, &me_reg); /* preinitialize ACK */ reply.type = GNRC_NETAPI_MSG_TYPE_ACK; /* start event loop */ while (1) { DEBUG("6lo: waiting for incoming message.\n"); msg_receive(&msg); switch (msg.type) { case GNRC_NETAPI_MSG_TYPE_RCV: DEBUG("6lo: GNRC_NETDEV_MSG_TYPE_RCV received\n"); _receive(msg.content.ptr); break; case GNRC_NETAPI_MSG_TYPE_SND: DEBUG("6lo: GNRC_NETDEV_MSG_TYPE_SND received\n"); _send(msg.content.ptr); break; case GNRC_NETAPI_MSG_TYPE_GET: case GNRC_NETAPI_MSG_TYPE_SET: DEBUG("6lo: reply to unsupported get/set\n"); reply.content.value = -ENOTSUP; msg_reply(&msg, &reply); break; #ifdef MODULE_GNRC_SIXLOWPAN_FRAG case GNRC_SIXLOWPAN_MSG_FRAG_SND: DEBUG("6lo: send fragmented event received\n"); gnrc_sixlowpan_frag_send(msg.content.ptr); break; #endif default: DEBUG("6lo: operation not supported\n"); break; } } return NULL; }
static void *_event_loop(void *args) { msg_t msg, reply, msg_q[NG_SIXLOWPAN_MSG_QUEUE_SIZE]; ng_netreg_entry_t me_reg; (void)args; msg_init_queue(msg_q, NG_SIXLOWPAN_MSG_QUEUE_SIZE); me_reg.demux_ctx = NG_NETREG_DEMUX_CTX_ALL; me_reg.pid = thread_getpid(); /* register interest in all 6LoWPAN packets */ ng_netreg_register(NG_NETTYPE_SIXLOWPAN, &me_reg); /* preinitialize ACK */ reply.type = NG_NETAPI_MSG_TYPE_ACK; /* start event loop */ while (1) { DEBUG("6lo: waiting for incoming message.\n"); msg_receive(&msg); switch (msg.type) { case NG_NETAPI_MSG_TYPE_RCV: DEBUG("6lo: NG_NETDEV_MSG_TYPE_RCV received\n"); _receive((ng_pktsnip_t *)msg.content.ptr); break; case NG_NETAPI_MSG_TYPE_SND: DEBUG("6lo: NG_NETDEV_MSG_TYPE_SND received\n"); _send((ng_pktsnip_t *)msg.content.ptr); break; case NG_NETAPI_MSG_TYPE_GET: case NG_NETAPI_MSG_TYPE_SET: DEBUG("6lo: reply to unsupported get/set\n"); reply.content.value = -ENOTSUP; msg_reply(&msg, &reply); break; default: DEBUG("6lo: operation not supported\n"); break; } } return NULL; }
static void _decapsulate(gnrc_pktsnip_t *pkt) { gnrc_pktsnip_t *ptr = pkt; pkt->type = GNRC_NETTYPE_UNDEF; /* prevent payload (the encapsulated packet) * from being removed */ /* Remove encapsulating IPv6 header */ while ((ptr->next != NULL) && (ptr->next->type == GNRC_NETTYPE_IPV6)) { gnrc_pktbuf_remove_snip(pkt, pkt->next); } pkt->type = GNRC_NETTYPE_IPV6; _receive(pkt); }
void SMTP_starttls(T S, SslOptions_T options) { ASSERT(S); if (S->flags & MTA_StartTLS) { _send(S, "STARTTLS\r\n"); _receive(S, 220, NULL); // Switch to TLS Socket_enableSsl(S->socket, options, NULL); // Reset state and flags and send EHLO again (see RFC 3207 section 4.2) S->flags = MTA_None; S->state = SMTP_Greeting; SMTP_helo(S, S->name); } else { THROW(IOException, "STARTTLS required but the mail server doesn't support it"); } S->state = SMTP_StartTLS; }
static void *_event_loop(void *arg) { (void)arg; msg_t msg, reply; msg_t msg_queue[NG_UDP_MSG_QUEUE_SIZE]; ng_netreg_entry_t netreg; /* preset reply message */ reply.type = NG_NETAPI_MSG_TYPE_ACK; reply.content.value = (uint32_t)-ENOTSUP; /* initialize message queue */ msg_init_queue(msg_queue, NG_UDP_MSG_QUEUE_SIZE); /* register UPD at netreg */ netreg.demux_ctx = NG_NETREG_DEMUX_CTX_ALL; netreg.pid = thread_getpid(); ng_netreg_register(NG_NETTYPE_UDP, &netreg); /* dispatch NETAPI messages */ while (1) { msg_receive(&msg); switch (msg.type) { case NG_NETAPI_MSG_TYPE_RCV: DEBUG("udp: NG_NETAPI_MSG_TYPE_RCV\n"); _receive((ng_pktsnip_t *)msg.content.ptr); break; case NG_NETAPI_MSG_TYPE_SND: DEBUG("udp: NG_NETAPI_MSG_TYPE_SND\n"); _send((ng_pktsnip_t *)msg.content.ptr); break; case NG_NETAPI_MSG_TYPE_SET: case NG_NETAPI_MSG_TYPE_GET: msg_reply(&msg, &reply); break; default: DEBUG("udp: received unidentified message\n"); break; } } /* never reached */ return NULL; }
void SMTP_auth(T S, const char *username, const char *password) { ASSERT(S); ASSERT(username); ASSERT(password); char buffer[STRLEN] = {}; // PLAIN has precedence if (S->flags & MTA_AuthPlain) { int len = snprintf(buffer, STRLEN, "%c%s%c%s", '\0', username, '\0', password); char *b64 = encode_base64(len, (unsigned char *)buffer); TRY { _send(S, "AUTH PLAIN %s\r\n", b64); _receive(S, 235, NULL); } FINALLY { FREE(b64); } END_TRY; } else if (S->flags & MTA_AuthLogin) {
void CThorTransferGroup::main() { CriticalBlock block(sect); acceptListener = ISocket::create(rcvPort); while (count&&!aborted) // not yet! { ISocket *sock; { CriticalUnblock unblock(sect); sock = acceptListener->accept(true); if (!sock) break; } if (aborted) break; _receive(sock); count--; } acceptListener->Release(); acceptListener = NULL; }
static void *_event_loop(void *args) { msg_t msg, reply, msg_q[GNRC_IPV6_MSG_QUEUE_SIZE]; gnrc_netreg_entry_t me_reg; (void)args; msg_init_queue(msg_q, GNRC_IPV6_MSG_QUEUE_SIZE); me_reg.demux_ctx = GNRC_NETREG_DEMUX_CTX_ALL; me_reg.pid = thread_getpid(); /* register interest in all IPv6 packets */ gnrc_netreg_register(GNRC_NETTYPE_IPV6, &me_reg); /* preinitialize ACK */ reply.type = GNRC_NETAPI_MSG_TYPE_ACK; /* start event loop */ while (1) { DEBUG("ipv6: waiting for incoming message.\n"); msg_receive(&msg); switch (msg.type) { case GNRC_NETAPI_MSG_TYPE_RCV: DEBUG("ipv6: GNRC_NETAPI_MSG_TYPE_RCV received\n"); _receive(msg.content.ptr); break; case GNRC_NETAPI_MSG_TYPE_SND: DEBUG("ipv6: GNRC_NETAPI_MSG_TYPE_SND received\n"); _send(msg.content.ptr, true); break; case GNRC_NETAPI_MSG_TYPE_GET: case GNRC_NETAPI_MSG_TYPE_SET: DEBUG("ipv6: reply to unsupported get/set\n"); reply.content.value = -ENOTSUP; msg_reply(&msg, &reply); break; #ifdef MODULE_GNRC_NDP case GNRC_NDP_MSG_RTR_TIMEOUT: DEBUG("ipv6: Router timeout received\n"); ((gnrc_ipv6_nc_t *)msg.content.ptr)->flags &= ~GNRC_IPV6_NC_IS_ROUTER; break; /* XXX reactivate when https://github.com/RIOT-OS/RIOT/issues/5122 is * solved properly */ /* case GNRC_NDP_MSG_ADDR_TIMEOUT: */ /* DEBUG("ipv6: Router advertisement timer event received\n"); */ /* gnrc_ipv6_netif_remove_addr(KERNEL_PID_UNDEF, */ /* msg.content.ptr); */ /* break; */ case GNRC_NDP_MSG_NBR_SOL_RETRANS: DEBUG("ipv6: Neigbor solicitation retransmission timer event received\n"); gnrc_ndp_retrans_nbr_sol(msg.content.ptr); break; case GNRC_NDP_MSG_NC_STATE_TIMEOUT: DEBUG("ipv6: Neigbor cache state timeout received\n"); gnrc_ndp_state_timeout(msg.content.ptr); break; #endif #ifdef MODULE_GNRC_NDP_ROUTER case GNRC_NDP_MSG_RTR_ADV_RETRANS: DEBUG("ipv6: Router advertisement retransmission event received\n"); gnrc_ndp_router_retrans_rtr_adv(msg.content.ptr); break; case GNRC_NDP_MSG_RTR_ADV_DELAY: DEBUG("ipv6: Delayed router advertisement event received\n"); gnrc_ndp_router_send_rtr_adv(msg.content.ptr); break; #endif #ifdef MODULE_GNRC_NDP_HOST case GNRC_NDP_MSG_RTR_SOL_RETRANS: DEBUG("ipv6: Router solicitation retransmission event received\n"); gnrc_ndp_host_retrans_rtr_sol(msg.content.ptr); break; #endif #ifdef MODULE_GNRC_SIXLOWPAN_ND case GNRC_SIXLOWPAN_ND_MSG_MC_RTR_SOL: DEBUG("ipv6: Multicast router solicitation event received\n"); gnrc_sixlowpan_nd_mc_rtr_sol(msg.content.ptr); break; case GNRC_SIXLOWPAN_ND_MSG_UC_RTR_SOL: DEBUG("ipv6: Unicast router solicitation event received\n"); gnrc_sixlowpan_nd_uc_rtr_sol(msg.content.ptr); break; # ifdef MODULE_GNRC_SIXLOWPAN_CTX case GNRC_SIXLOWPAN_ND_MSG_DELETE_CTX: DEBUG("ipv6: Delete 6LoWPAN context event received\n"); gnrc_sixlowpan_ctx_remove(((((gnrc_sixlowpan_ctx_t *)msg.content.ptr)->flags_id) & GNRC_SIXLOWPAN_CTX_FLAGS_CID_MASK)); break; # endif #endif #ifdef MODULE_GNRC_SIXLOWPAN_ND_ROUTER case GNRC_SIXLOWPAN_ND_MSG_ABR_TIMEOUT: DEBUG("ipv6: border router timeout event received\n"); gnrc_sixlowpan_nd_router_abr_remove(msg.content.ptr); break; /* XXX reactivate when https://github.com/RIOT-OS/RIOT/issues/5122 is * solved properly */ /* case GNRC_SIXLOWPAN_ND_MSG_AR_TIMEOUT: */ /* DEBUG("ipv6: address registration timeout received\n"); */ /* gnrc_sixlowpan_nd_router_gc_nc(msg.content.ptr); */ /* break; */ case GNRC_NDP_MSG_RTR_ADV_SIXLOWPAN_DELAY: DEBUG("ipv6: Delayed router advertisement event received\n"); gnrc_ipv6_nc_t *nc_entry = msg.content.ptr; gnrc_ndp_internal_send_rtr_adv(nc_entry->iface, NULL, &(nc_entry->ipv6_addr), false); break; #endif default: break; } } return NULL; }
/* * call-seq: * mq.shift([buffer, [timeout]]) => message * * Takes the highest priority message off the queue and returns * the message as a String. * * If the optional +buffer+ is present, then it must be a String * which will receive the data. * * If the optional +timeout+ is present, then it may be a Float * or Integer specifying the timeout in seconds. Errno::ETIMEDOUT * will be raised if +timeout+ has elapsed and there are no messages * in the queue. * * On some older systems, the +timeout+ argument is not currently * supported and may raise NotImplementedError if +timeout+ is used. */ static VALUE shift(int argc, VALUE *argv, VALUE self) { return _receive(0, argc, argv, self); }
/* * call-seq: * mq.receive([buffer, [timeout]]) => [ message, priority ] * * Takes the highest priority message off the queue and returns * an array containing the message as a String and the Integer * priority of the message. * * If the optional +buffer+ is present, then it must be a String * which will receive the data. * * If the optional +timeout+ is present, then it may be a Float * or Integer specifying the timeout in seconds. Errno::ETIMEDOUT * will be raised if +timeout+ has elapsed and there are no messages * in the queue. * * On some older systems, the +timeout+ argument is not currently * supported and may raise NotImplementedError if +timeout+ is used. */ static VALUE receive(int argc, VALUE *argv, VALUE self) { return _receive(PMQ_WANTARRAY, argc, argv, self); }
/* * call-seq: * mq.tryshift([buffer [, timeout]]) => message or nil * * Exactly like POSIX_MQ#shift, except it returns +nil+ instead of raising * Errno::EAGAIN when non-blocking operation is desired. * * This does not guarantee non-blocking behavior, the message queue must * be made non-blocking before calling this method. */ static VALUE tryshift(int argc, VALUE *argv, VALUE self) { return _receive(PMQ_TRY, argc, argv, self); }
/* internal functions */ static void *_event_loop(void *args) { msg_t msg, reply, msg_q[NG_IPV6_MSG_QUEUE_SIZE]; ng_netreg_entry_t me_reg; (void)args; msg_init_queue(msg_q, NG_IPV6_MSG_QUEUE_SIZE); me_reg.demux_ctx = NG_NETREG_DEMUX_CTX_ALL; me_reg.pid = thread_getpid(); /* register interest in all IPv6 packets */ ng_netreg_register(NG_NETTYPE_IPV6, &me_reg); /* preinitialize ACK */ reply.type = NG_NETAPI_MSG_TYPE_ACK; /* start event loop */ while (1) { DEBUG("ipv6: waiting for incoming message.\n"); msg_receive(&msg); switch (msg.type) { case NG_NETAPI_MSG_TYPE_RCV: DEBUG("ipv6: NG_NETAPI_MSG_TYPE_RCV received\n"); _receive((ng_pktsnip_t *)msg.content.ptr); break; case NG_NETAPI_MSG_TYPE_SND: DEBUG("ipv6: NG_NETAPI_MSG_TYPE_SND received\n"); _send((ng_pktsnip_t *)msg.content.ptr, true); break; case NG_NETAPI_MSG_TYPE_GET: case NG_NETAPI_MSG_TYPE_SET: DEBUG("ipv6: reply to unsupported get/set\n"); reply.content.value = -ENOTSUP; msg_reply(&msg, &reply); break; case NG_NDP_MSG_RTR_TIMEOUT: DEBUG("ipv6: Router timeout received\n"); ((ng_ipv6_nc_t *)msg.content.ptr)->flags &= ~NG_IPV6_NC_IS_ROUTER; break; case NG_NDP_MSG_ADDR_TIMEOUT: DEBUG("ipv6: Router advertisement timer event received\n"); ng_ipv6_netif_remove_addr(KERNEL_PID_UNDEF, (ng_ipv6_addr_t *)msg.content.ptr); break; case NG_NDP_MSG_NBR_SOL_RETRANS: DEBUG("ipv6: Neigbor solicitation retransmission timer event received\n"); ng_ndp_retrans_nbr_sol((ng_ipv6_nc_t *)msg.content.ptr); break; case NG_NDP_MSG_NC_STATE_TIMEOUT: DEBUG("ipv6: Neigbor cace state timeout received\n"); ng_ndp_state_timeout((ng_ipv6_nc_t *)msg.content.ptr); break; default: break; } } return NULL; }
int receive_transaction_packet(int s, struct QItem *TxnQItem) { void *data = NULL; int length = 0; int interaction_q_slot_id = -1; int interaction; int rec; /* Receive transaction type. */ if ((rec = _receive(s, (void *) &interaction, sizeof(interaction))) == -1) { LOG_ERROR_MESSAGE("cannot receive interaction type"); return ERROR; } if (rec == SOCKET_CLOSE) return SOCKET_CLOSE; TxnQItem->TxnType = interaction; /* Receive transaction data. */ switch (interaction) { case ADMIN_CONFIRM: interaction_q_slot_id = PinSlot(&app_admin_confirm_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_admin_confirm_array.data_array[interaction_q_slot_id].admin_confirm_data); length = sizeof(struct admin_confirm_t); break; case ADMIN_REQUEST: interaction_q_slot_id = PinSlot(&app_admin_request_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_admin_request_array.data_array[interaction_q_slot_id].admin_request_data); length = sizeof(struct admin_request_t); break; case BEST_SELLERS: interaction_q_slot_id = PinSlot(&app_best_sellers_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_best_sellers_array.data_array[interaction_q_slot_id].best_sellers_data); length = sizeof(struct best_sellers_t); break; case BUY_CONFIRM: interaction_q_slot_id = PinSlot(&app_buy_confirm_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_buy_confirm_array.data_array[interaction_q_slot_id].buy_confirm_data); length = sizeof(struct buy_confirm_t); break; case BUY_REQUEST: interaction_q_slot_id = PinSlot(&app_buy_request_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_buy_request_array.data_array[interaction_q_slot_id].buy_request_data); length = sizeof(struct buy_request_t); break; case HOME: interaction_q_slot_id = PinSlot(&app_home_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_home_array.data_array[interaction_q_slot_id].home_data); length = sizeof(struct home_t); break; case NEW_PRODUCTS: interaction_q_slot_id = PinSlot(&app_new_products_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_new_products_array.data_array[interaction_q_slot_id].new_products_data); length = sizeof(struct new_products_t); break; case ORDER_DISPLAY: interaction_q_slot_id = PinSlot(&app_order_display_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_order_display_array.data_array[interaction_q_slot_id].order_display_data); length = sizeof(struct order_display_t); break; case ORDER_INQUIRY: interaction_q_slot_id = PinSlot(&app_order_inquiry_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_order_inquiry_array.data_array[interaction_q_slot_id].order_inquiry_data); length = sizeof(struct order_inquiry_t); break; case PRODUCT_DETAIL: interaction_q_slot_id = PinSlot(&app_product_detail_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_product_detail_array.data_array[interaction_q_slot_id].product_detail_data); length = sizeof(struct product_detail_t); break; case SEARCH_REQUEST: interaction_q_slot_id = PinSlot(&app_search_request_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_search_request_array.data_array[interaction_q_slot_id].search_request_data); length = sizeof(struct search_request_t); break; case SEARCH_RESULTS: interaction_q_slot_id = PinSlot(&app_search_results_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_search_results_array.data_array[interaction_q_slot_id].search_results_data); length = sizeof(struct search_results_t); break; case SHOPPING_CART: interaction_q_slot_id = PinSlot(&app_shopping_cart_array); if (interaction_q_slot_id == -1) { LOG_ERROR_MESSAGE("PinSlot failed"); return ERROR; } data = &(app_shopping_cart_array.data_array[interaction_q_slot_id].shopping_cart_data); length = sizeof(struct shopping_cart_t); break; } TxnQItem->SlotID = interaction_q_slot_id; if (_receive(s, data, length) == -1) { LOG_ERROR_MESSAGE("cannot receive interaction data"); return ERROR; } return OK; }
static void *_event_loop(void *args) { msg_t msg, reply; (void)args; msg_init_queue(_msg_q, GNRC_RPL_MSG_QUEUE_SIZE); /* preinitialize ACK */ reply.type = GNRC_NETAPI_MSG_TYPE_ACK; trickle_t *trickle; gnrc_rpl_dodag_t *dodag; /* start event loop */ while (1) { DEBUG("RPL: waiting for incoming message.\n"); msg_receive(&msg); switch (msg.type) { case GNRC_RPL_MSG_TYPE_LIFETIME_UPDATE: DEBUG("RPL: GNRC_RPL_MSG_TYPE_LIFETIME_UPDATE received\n"); _update_lifetime(); break; case GNRC_RPL_MSG_TYPE_TRICKLE_INTERVAL: DEBUG("RPL: GNRC_RPL_MSG_TYPE_TRICKLE_INTERVAL received\n"); trickle = (trickle_t *) msg.content.ptr; if (trickle && (trickle->callback.func != NULL)) { trickle_interval(trickle); } break; case GNRC_RPL_MSG_TYPE_TRICKLE_CALLBACK: DEBUG("RPL: GNRC_RPL_MSG_TYPE_TRICKLE_CALLBACK received\n"); trickle = (trickle_t *) msg.content.ptr; if (trickle && (trickle->callback.func != NULL)) { trickle_callback(trickle); } break; case GNRC_RPL_MSG_TYPE_DAO_HANDLE: DEBUG("RPL: GNRC_RPL_MSG_TYPE_DAO_HANDLE received\n"); dodag = (gnrc_rpl_dodag_t *) msg.content.ptr; if (dodag && (dodag->state != 0)) { _dao_handle_send(dodag); } break; case GNRC_RPL_MSG_TYPE_CLEANUP_HANDLE: DEBUG("RPL: GNRC_RPL_MSG_TYPE_CLEANUP received\n"); dodag = (gnrc_rpl_dodag_t *) msg.content.ptr; if (dodag && (dodag->state != 0) && (dodag->parents == NULL)) { /* no parents - delete this DODAG */ gnrc_rpl_dodag_remove(dodag); } break; case GNRC_NETAPI_MSG_TYPE_RCV: DEBUG("RPL: GNRC_NETAPI_MSG_TYPE_RCV received\n"); _receive((gnrc_pktsnip_t *)msg.content.ptr); break; case GNRC_NETAPI_MSG_TYPE_SND: case GNRC_NETAPI_MSG_TYPE_GET: case GNRC_NETAPI_MSG_TYPE_SET: DEBUG("RPL: reply to unsupported recv/get/set\n"); reply.content.value = -ENOTSUP; msg_reply(&msg, &reply); break; default: break; } } return NULL; }
bool RealmConnection::connect() { UT_DEBUGMSG(("RealmConnection::connect()\n")); UT_return_val_if_fail(!m_thread_ptr, false); try { std::string address = m_address; int port = m_port; if (m_tls) { // setup our local TLS tunnel to the realm m_tls_tunnel_ptr.reset(new tls_tunnel::ClientProxy(m_address, m_port, m_ca_file, false)); m_tls_tunnel_ptr->setup(); asio::thread thread(boost::bind(&tls_tunnel::ClientProxy::run, m_tls_tunnel_ptr)); // make sure we connect to the tunnel, and not directly to the realm address = m_tls_tunnel_ptr->local_address(); port = m_tls_tunnel_ptr->local_port(); } // connect! asio::ip::tcp::resolver::query query(address, boost::lexical_cast<std::string>(port)); asio::ip::tcp::resolver resolver(m_io_service); asio::ip::tcp::resolver::iterator iterator(resolver.resolve(query)); bool connected = false; asio::error_code error_code; while (iterator != asio::ip::tcp::resolver::iterator()) { try { m_socket.connect(*iterator); connected = true; break; } catch (asio::system_error se) { error_code = se.code(); try { m_socket.close(); } catch(...) {} } iterator++; } if (!connected) { UT_DEBUGMSG(("Error connecting to realm: %s", asio::system_error(error_code).what())); return false; } } catch (tls_tunnel::Exception& e) { UT_DEBUGMSG(("tls_tunnel exception connecting to realm: %s\n", e.message().c_str())); return false; } catch (asio::system_error& se) { UT_DEBUGMSG(("Error connecting to realm: %s\n", se.what())); return false; } catch (...) { UT_DEBUGMSG(("Error connecting to realm!\n")); return false; } if (!_login()) { UT_DEBUGMSG(("RealmConnection login failed!\n")); _disconnect(); return false; } UT_DEBUGMSG(("RealmConnection connected\n")); // start reading realm messages _receive(); m_thread_ptr.reset(new asio::thread(boost::bind(&asio::io_service::run, &m_io_service))); return true; }
void SMTP_greeting(T S) { ASSERT(S); _receive(S, 220, NULL); S->state = SMTP_Greeting; }