static void mqtt_socket_sent(void *arg) { NODE_DBG("enter mqtt_socket_sent.\n"); struct espconn *pesp_conn = arg; if(pesp_conn == NULL) return; lmqtt_userdata *mud = (lmqtt_userdata *)pesp_conn->reverse; if(mud == NULL) return; if(!mud->connected) return; // call mqtt_sent() mud->event_timeout = 0; mud->keep_alive_tick = 0; if(mud->connState == MQTT_CONNECT_SENDING){ mud->connState = MQTT_CONNECT_SENT; // MQTT_CONNECT not queued. return; } NODE_DBG("sent1, queue size: %d\n", msg_size(&(mud->mqtt_state.pending_msg_q))); // qos = 0, publish and forgot. msg_queue_t *node = msg_peek(&(mud->mqtt_state.pending_msg_q)); if(node && node->msg_type == MQTT_MSG_TYPE_PUBLISH && node->publish_qos == 0) { msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); if(mud->cb_puback_ref == LUA_NOREF) return; if(mud->self_ref == LUA_NOREF) return; if(mud->L == NULL) return; lua_rawgeti(mud->L, LUA_REGISTRYINDEX, mud->cb_puback_ref); lua_rawgeti(mud->L, LUA_REGISTRYINDEX, mud->self_ref); // pass the userdata to callback func in lua lua_call(mud->L, 1, 0); } else if(node && node->msg_type == MQTT_MSG_TYPE_PUBACK && node->publish_qos == 1) { msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); } else if(node && node->msg_type == MQTT_MSG_TYPE_PUBCOMP) { msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); } else if(node && node->msg_type == MQTT_MSG_TYPE_PINGREQ) { msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); } NODE_DBG("sent2, queue size: %d\n", msg_size(&(mud->mqtt_state.pending_msg_q))); NODE_DBG("leave mqtt_socket_sent.\n"); }
/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++* * alloc_msg(): * *++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/ struct msg *alloc_msg(void) { struct msg *msg; if (heap_msg.head == NULL && heap_msg.tail == NULL) { msg = (struct msg *)calloc(1, sizeof(struct msg)); heap_mem += (msg == NULL) ? 0 : sizeof(struct msg); } else msg = msg_dequeue(&heap_msg); return msg; }
msg_envelope * k_request_msg_env() { msg_envelope *temp = msg_dequeue(free_env_Q); while (temp==NULL) { printf("k_request_msg_env: Process %i got blocked\n", current_process->process_id); fflush(stdout); current_process->process_state = BLOCKED_ON_RECEIVE; blocked_on_resource_Q_enqueue(current_process); process_switch(); } return temp; }
static void tx_ui(layer2_t *l2) { msg_t *msg; u_char header[MAX_HEADER_LEN]; int i; i = sethdraddr(l2, header, CMD); if (test_bit(FLG_LAPD_NET, &l2->flag)) header[1] = 0xff; /* tei 127 */ header[i++] = UI; while ((msg = msg_dequeue(&l2->ui_queue))) { msg_pull(msg, mISDNUSER_HEAD_SIZE); memcpy(msg_push(msg, i), header, i); msg_push(msg, mISDNUSER_HEAD_SIZE); enqueue_ui(l2, msg); } }
Msg_Env* k_receive_message(){ /*if(current_process ->inbox->head == NULL){ current_process->state = 3; //sets state to blocked on receive enqueue(blocked_on_receive, current_process); // adds to blocked on receive queue switch_process(); }*/ if(current_process ->inbox->head == NULL){ //this code is only for initial implemantation current_process->state = NO_BLK_RCV; //printf("\ninbox is empty\n"); return NULL; } Msg_Env *message_envelope = msg_dequeue(current_process->inbox); //Add SENDER_PID, RECEIVER_PID, CURRENT_TIME to message trace. return message_envelope; }
msg_envelope * k_receive_message() { printf("k_receive_message: Process %i calls receive\n", current_process->process_id); fflush(stdout); while ( current_process->msg_envelope_q.size == 0) { if (current_process->process_priority == IPROCESS) return NULL; else { printf("k_receive_message: Process %i got blocked\n", current_process->process_id); fflush(stdout); current_process->process_state = BLOCKED_ON_RECEIVE; process_switch(); printf("k_receive_message: Process %i resumes with msg q size: %i\n", current_process->process_id, current_process->msg_envelope_q.size); fflush(stdout); } } msg_queue *temp_queue; temp_queue = ¤t_process->msg_envelope_q; msg_envelope *temp_envelope = (msg_envelope *) msg_dequeue(temp_queue); //printf("k_receive_message: Sender id is %i",temp_envelope->sender_pid); //store the details of this receive transaction on the receive_trace_buffer /*if (send_tr_buf->index == 15) { //If the trace buffer is full int i; for (i = 0; i <receive_tr_buf->index; i++){ //Shift the stored data by 1 unit receive_tr_buf->receive_trace_buffer_array[i].sender_pid = receive_tr_buf->receive_trace_buffer_array[i+1].sender_pid; receive_tr_buf->receive_trace_buffer_array[i].receiver_pid = receive_tr_buf->receive_trace_buffer_array[i+1].receiver_pid; receive_tr_buf->receive_trace_buffer_array[i].time = receive_tr_buf->receive_trace_buffer_array[i+1].time; } receive_tr_buf->receive_trace_buffer_array[15].sender_pid = temp_envelope->sender_pid; receive_tr_buf->receive_trace_buffer_array[15].receiver_pid = temp_envelope->receiver_pid; receive_tr_buf->receive_trace_buffer_array[15].time = kernel_clock; } else { receive_tr_buf->index++; receive_tr_buf->receive_trace_buffer_array[receive_tr_buf->index].sender_pid = temp_envelope->sender_pid; receive_tr_buf->receive_trace_buffer_array[receive_tr_buf->index].receiver_pid = temp_envelope->receiver_pid; receive_tr_buf->receive_trace_buffer_array[receive_tr_buf->index].time = kernel_clock; }*/ return temp_envelope; }
int fill_fdset (struct msg_queue *queue, fd_set *set) { struct msg_queue tmp; struct msg *mptr; int max = 0; FD_ZERO(set); init_msg_queue(&tmp); while( (mptr = msg_dequeue(queue))!= NULL ) { FD_SET(mptr->type.io.io_socket, set); max = ( max > mptr->type.io.io_socket )? max : mptr->type.io.io_socket; msg_enqueue(&tmp, mptr); } msgmove(queue, &tmp); return max; }
void *dataio (void *arg) { struct msg_queue wpenq; /* Pendientes de escritura */ struct msg_queue rpenq; /* Pendientes de lectura */ struct msg_queue to_krn; struct msg_queue tmp; struct mb_queue no_sent; struct itc_event_info ieinfo; struct msg *mptr; ssize_t io_ret; int max, ret; fd_set wset, rset; char *where; arg = NULL; itc_block_signal(); init_msg_queue(&wpenq); init_msg_queue(&rpenq); while (1) { FD_ZERO(&wset); FD_ZERO(&rset); /* * Add itc file descriptor */ FD_SET(itc_event, &rset); max = itc_event; /* * Add all sockets scaning read events */ ret = fill_fdset(&rpenq, &rset); max = ( max > ret ) ? max : ret; /* * Add all sockets scaning write envents */ ret = fill_fdset(&wpenq, &wset); max = ( max > ret ) ? max : ret; ret = select_nosignal(max+1, &rset, &wset, NULL, NULL); if (ret == -1) { where = "select_nosignal()"; goto panic; } init_msg_queue(&tmp); init_msg_queue(&to_krn); while ((mptr = msg_dequeue(&wpenq)) != NULL) { if (FD_ISSET(mptr->type.io.io_socket, &wset)) { io_ret = senddata(mptr->type.io.io_socket, &mptr->mb.mbq, &no_sent, DISCARD_TRUE); if (io_ret > 0 && no_sent.size == 0) { mptr->type.io.io_opt = IO_OPT_WRITE; mptr->type.io.io_ret = IO_RET_SUCCESS; mptr->type.io.io_errno = 0; mptr->type.io.io_rep_len += io_ret; mptr->msg_type = MSG_IO_REPLY; msg_enqueue(&to_krn, mptr); } else if ( io_ret == -1 ) { mptr->type.io.io_opt = IO_OPT_WRITE; mptr->type.io.io_ret = IO_RET_FAILURE; mptr->type.io.io_errno = errno; mptr->msg_type = MSG_IO_REPLY; mbuffmove(&(mptr->mb.mbq), &no_sent); msg_enqueue(&to_krn, mptr); } else { mptr->type.io.io_rep_len += io_ret; mbuffmove(&(mptr->mb.mbq), &no_sent); msg_enqueue(&tmp, mptr); } } else msg_enqueue(&tmp, mptr); } msgmove(&wpenq, &tmp); init_msg_queue(&tmp); while ((mptr = msg_dequeue(&rpenq)) != NULL) { if (FD_ISSET(mptr->type.io.io_socket, &rset)) { io_ret = recvdata(mptr->type.io.io_socket, &(mptr->mb.mbq), mptr->type.io.io_req_len, mptr->type.io.io_chunk_size); if (io_ret > 0) { mptr->type.io.io_opt = IO_OPT_READ; mptr->type.io.io_ret = IO_RET_SUCCESS; mptr->type.io.io_errno = 0; mptr->type.io.io_rep_len = io_ret; mptr->msg_type = MSG_IO_REPLY; msg_enqueue(&to_krn, mptr); } else if ( io_ret == -1 ) { mptr->type.io.io_opt = IO_OPT_READ; mptr->type.io.io_ret = IO_RET_FAILURE; mptr->type.io.io_errno = errno; mptr->msg_type = MSG_IO_REPLY; msg_enqueue(&to_krn, mptr); } else { msg_enqueue(&tmp, mptr); } } else msg_enqueue(&tmp, mptr); } msgmove(&rpenq, &tmp); init_msg_queue(&tmp); /* * Look for new request */ if (FD_ISSET(itc_event, &rset)) { ret = itc_read_event(itc_event, &ieinfo); if (ret == -1) { where = "itc_read_event()"; goto panic; } if (ieinfo.src != KERNEL_LAYER_THREAD) { where = "[itc wrong src]"; goto panic; } itc_readfrom(KERNEL_LAYER_THREAD, &tmp, 0); while( (mptr = msg_dequeue(&tmp)) != NULL) { if (mptr->msg_type != MSG_IO_REQUEST) /* Drop silently */ continue; mptr->type.io.io_ret = 0; mptr->type.io.io_errno = 0; mptr->type.io.io_rep_len = 0; if (mptr->type.io.io_opt == IO_OPT_READ) msg_enqueue(&rpenq, mptr); if (mptr->type.io.io_opt == IO_OPT_WRITE) msg_enqueue(&wpenq, mptr); } } /* * Send the reply to the kernel */ if (to_krn.size != 0) itc_writeto(KERNEL_LAYER_THREAD, &to_krn, 0); } panic: PANIC(errno,"DATAIO_LAYER_THREAD",where); return NULL; }
/* * Send a message. * * The current thread will be blocked until any other thread * receives and reply the message. A thread can send a * message to any object if it knows the object id. */ int msg_send(object_t obj, void *msg, size_t size) { struct msg_header *hdr; thread_t t; void *kmsg; int rc; if (!user_area(msg)) return EFAULT; if (size < sizeof(struct msg_header)) return EINVAL; sched_lock(); if (!object_valid(obj)) { sched_unlock(); return EINVAL; } /* * A thread can not send a message when it is * already receiving from the target object. * It will obviously cause a deadlock. */ if (obj == curthread->recvobj) { sched_unlock(); return EDEADLK; } /* * Translate message address to the kernel linear * address. So that a receiver thread can access * the message via kernel pointer. We can catch * the page fault here. */ if ((kmsg = kmem_map(msg, size)) == NULL) { sched_unlock(); return EFAULT; } curthread->msgaddr = kmsg; curthread->msgsize = size; /* * The sender ID is filled in the message header * by the kernel. So, the receiver can trust it. */ hdr = (struct msg_header *)kmsg; hdr->task = curtask; /* * If receiver already exists, wake it up. * The highest priority thread can get the message. */ if (!queue_empty(&obj->recvq)) { t = msg_dequeue(&obj->recvq); sched_unsleep(t, 0); } /* * Sleep until we get a reply message. * Note: Do not touch any data in the object * structure after we wakeup. This is because the * target object may be deleted while we are sleeping. */ curthread->sendobj = obj; msg_enqueue(&obj->sendq, curthread); rc = sched_sleep(&ipc_event); if (rc == SLP_INTR) queue_remove(&curthread->ipc_link); curthread->sendobj = NULL; sched_unlock(); /* * Check sleep result. */ switch (rc) { case SLP_BREAK: return EAGAIN; /* Receiver has been terminated */ case SLP_INVAL: return EINVAL; /* Object has been deleted */ case SLP_INTR: return EINTR; /* Exception */ default: /* DO NOTHING */ break; } return 0; }
/* * Receive a message. * * A thread can receive a message from the object which was * created by any thread belongs to same task. If the message * has not reached yet, it blocks until any message comes in. * * The size argument specifies the "maximum" size of the message * buffer to receive. If the sent message is larger than this * size, the kernel will automatically clip the message to this * maximum buffer size. * * When a message is received, the sender thread is removed from * object's send queue. So, another thread can receive the * subsequent message from that object. This is important for * the multi-thread server which must receive multiple messages * simultaneously. */ int msg_receive(object_t obj, void *msg, size_t size) { thread_t t; size_t len; int rc, error = 0; if (!user_area(msg)) return EFAULT; sched_lock(); if (!object_valid(obj)) { sched_unlock(); return EINVAL; } if (obj->owner != curtask) { sched_unlock(); return EACCES; } /* * Check if this thread finished previous receive * operation. A thread can not receive different * messages at once. */ if (curthread->recvobj) { sched_unlock(); return EBUSY; } curthread->recvobj = obj; /* * If no message exists, wait until message arrives. */ while (queue_empty(&obj->sendq)) { /* * Block until someone sends a message. */ msg_enqueue(&obj->recvq, curthread); rc = sched_sleep(&ipc_event); if (rc != 0) { /* * Receive is failed due to some reasons. */ switch (rc) { case SLP_INVAL: error = EINVAL; /* Object has been deleted */ break; case SLP_INTR: queue_remove(&curthread->ipc_link); error = EINTR; /* Got exception */ break; default: panic("msg_receive"); break; } curthread->recvobj = NULL; sched_unlock(); return error; } /* * Check the existence of the sender thread again. * Even if this thread is woken by the sender thread, * the message may be received by another thread. * This may happen when another high priority thread * becomes runnable before we receive the message. */ } t = msg_dequeue(&obj->sendq); /* * Copy out the message to the user-space. */ len = MIN(size, t->msgsize); if (len > 0) { if (copyout(t->msgaddr, msg, len)) { msg_enqueue(&obj->sendq, t); curthread->recvobj = NULL; sched_unlock(); return EFAULT; } } /* * Detach the message from the target object. */ curthread->sender = t; t->receiver = curthread; sched_unlock(); return error; }
void mqtt_socket_timer(void *arg) { NODE_DBG("enter mqtt_socket_timer.\n"); lmqtt_userdata *mud = (lmqtt_userdata*) arg; if(mud == NULL) return; if(mud->pesp_conn == NULL){ NODE_DBG("mud->pesp_conn is NULL.\n"); os_timer_disarm(&mud->mqttTimer); return; } NODE_DBG("timer, queue size: %d\n", msg_size(&(mud->mqtt_state.pending_msg_q))); if(mud->event_timeout > 0){ NODE_DBG("event_timeout: %d.\n", mud->event_timeout); mud->event_timeout --; if(mud->event_timeout > 0){ return; } else { NODE_DBG("event timeout. \n"); if(mud->connState == MQTT_DATA) msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); // should remove the head of the queue and re-send with DUP = 1 // Not implemented yet. } } if(mud->connState == MQTT_INIT){ // socket connect time out. NODE_DBG("Can not connect to broker.\n"); // Never goes here. } else if(mud->connState == MQTT_CONNECT_SENDING){ // MQTT_CONNECT send time out. NODE_DBG("sSend MQTT_CONNECT failed.\n"); mud->connState = MQTT_INIT; if(mud->secure) espconn_secure_disconnect(mud->pesp_conn); else espconn_disconnect(mud->pesp_conn); mud->keep_alive_tick = 0; // not need count anymore } else if(mud->connState == MQTT_CONNECT_SENT){ // wait for CONACK time out. NODE_DBG("MQTT_CONNECT failed.\n"); } else if(mud->connState == MQTT_DATA){ msg_queue_t *pending_msg = msg_peek(&(mud->mqtt_state.pending_msg_q)); if(pending_msg){ mud->event_timeout = MQTT_SEND_TIMEOUT; if(mud->secure) espconn_secure_sent(mud->pesp_conn, pending_msg->msg.data, pending_msg->msg.length); else espconn_sent(mud->pesp_conn, pending_msg->msg.data, pending_msg->msg.length); mud->keep_alive_tick = 0; NODE_DBG("id: %d - qos: %d, length: %d\n", pending_msg->msg_id, pending_msg->publish_qos, pending_msg->msg.length); } else { // no queued event. mud->keep_alive_tick ++; if(mud->keep_alive_tick > mud->mqtt_state.connect_info->keepalive){ mud->event_timeout = MQTT_SEND_TIMEOUT; uint8_t temp_buffer[MQTT_BUF_SIZE]; mqtt_msg_init(&mud->mqtt_state.mqtt_connection, temp_buffer, MQTT_BUF_SIZE); NODE_DBG("\r\nMQTT: Send keepalive packet\r\n"); mqtt_message_t* temp_msg = mqtt_msg_pingreq(&mud->mqtt_state.mqtt_connection); msg_queue_t *node = msg_enqueue( &(mud->mqtt_state.pending_msg_q), temp_msg, 0, MQTT_MSG_TYPE_PINGREQ, (int)mqtt_get_qos(temp_msg->data) ); // only one message in queue, send immediately. if(mud->secure) espconn_secure_sent(mud->pesp_conn, temp_msg->data, temp_msg->length); else espconn_sent(mud->pesp_conn, temp_msg->data, temp_msg->length); mud->keep_alive_tick = 0; } } } NODE_DBG("leave mqtt_socket_timer.\n"); }
static void mqtt_socket_received(void *arg, char *pdata, unsigned short len) { NODE_DBG("enter mqtt_socket_received.\n"); uint8_t msg_type; uint8_t msg_qos; uint16_t msg_id; msg_queue_t *node = NULL; int length = (int)len; // uint8_t in_buffer[MQTT_BUF_SIZE]; uint8_t *in_buffer = (uint8_t *)pdata; struct espconn *pesp_conn = arg; if(pesp_conn == NULL) return; lmqtt_userdata *mud = (lmqtt_userdata *)pesp_conn->reverse; if(mud == NULL) return; READPACKET: if(length > MQTT_BUF_SIZE || length <= 0) return; // c_memcpy(in_buffer, pdata, length); uint8_t temp_buffer[MQTT_BUF_SIZE]; mqtt_msg_init(&mud->mqtt_state.mqtt_connection, temp_buffer, MQTT_BUF_SIZE); mqtt_message_t *temp_msg = NULL; switch(mud->connState){ case MQTT_CONNECT_SENDING: case MQTT_CONNECT_SENT: if(mqtt_get_type(in_buffer) != MQTT_MSG_TYPE_CONNACK){ NODE_DBG("MQTT: Invalid packet\r\n"); mud->connState = MQTT_INIT; if(mud->secure) espconn_secure_disconnect(pesp_conn); else espconn_disconnect(pesp_conn); } else { mud->connState = MQTT_DATA; NODE_DBG("MQTT: Connected\r\n"); if(mud->cb_connect_ref == LUA_NOREF) break; if(mud->self_ref == LUA_NOREF) break; if(mud->L == NULL) break; lua_rawgeti(mud->L, LUA_REGISTRYINDEX, mud->cb_connect_ref); lua_rawgeti(mud->L, LUA_REGISTRYINDEX, mud->self_ref); // pass the userdata(client) to callback func in lua lua_call(mud->L, 1, 0); break; } break; case MQTT_DATA: mud->mqtt_state.message_length_read = length; mud->mqtt_state.message_length = mqtt_get_total_length(in_buffer, mud->mqtt_state.message_length_read); msg_type = mqtt_get_type(in_buffer); msg_qos = mqtt_get_qos(in_buffer); msg_id = mqtt_get_id(in_buffer, mud->mqtt_state.message_length); msg_queue_t *pending_msg = msg_peek(&(mud->mqtt_state.pending_msg_q)); NODE_DBG("MQTT_DATA: type: %d, qos: %d, msg_id: %d, pending_id: %d\r\n", msg_type, msg_qos, msg_id, (pending_msg)?pending_msg->msg_id:0); switch(msg_type) { case MQTT_MSG_TYPE_SUBACK: if(pending_msg && pending_msg->msg_type == MQTT_MSG_TYPE_SUBSCRIBE && pending_msg->msg_id == msg_id){ NODE_DBG("MQTT: Subscribe successful\r\n"); msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); if (mud->cb_suback_ref == LUA_NOREF) break; if (mud->self_ref == LUA_NOREF) break; if(mud->L == NULL) break; lua_rawgeti(mud->L, LUA_REGISTRYINDEX, mud->cb_suback_ref); lua_rawgeti(mud->L, LUA_REGISTRYINDEX, mud->self_ref); lua_call(mud->L, 1, 0); } break; case MQTT_MSG_TYPE_UNSUBACK: if(pending_msg && pending_msg->msg_type == MQTT_MSG_TYPE_UNSUBSCRIBE && pending_msg->msg_id == msg_id){ NODE_DBG("MQTT: UnSubscribe successful\r\n"); msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); } break; case MQTT_MSG_TYPE_PUBLISH: if(msg_qos == 1){ temp_msg = mqtt_msg_puback(&mud->mqtt_state.mqtt_connection, msg_id); node = msg_enqueue(&(mud->mqtt_state.pending_msg_q), temp_msg, msg_id, MQTT_MSG_TYPE_PUBACK, (int)mqtt_get_qos(temp_msg->data) ); } else if(msg_qos == 2){ temp_msg = mqtt_msg_pubrec(&mud->mqtt_state.mqtt_connection, msg_id); node = msg_enqueue(&(mud->mqtt_state.pending_msg_q), temp_msg, msg_id, MQTT_MSG_TYPE_PUBREC, (int)mqtt_get_qos(temp_msg->data) ); } if(msg_qos == 1 || msg_qos == 2){ NODE_DBG("MQTT: Queue response QoS: %d\r\n", msg_qos); } deliver_publish(mud, in_buffer, mud->mqtt_state.message_length); break; case MQTT_MSG_TYPE_PUBACK: if(pending_msg && pending_msg->msg_type == MQTT_MSG_TYPE_PUBLISH && pending_msg->msg_id == msg_id){ NODE_DBG("MQTT: Publish with QoS = 1 successful\r\n"); msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); if(mud->cb_puback_ref == LUA_NOREF) break; if(mud->self_ref == LUA_NOREF) break; if(mud->L == NULL) break; lua_rawgeti(mud->L, LUA_REGISTRYINDEX, mud->cb_puback_ref); lua_rawgeti(mud->L, LUA_REGISTRYINDEX, mud->self_ref); // pass the userdata to callback func in lua lua_call(mud->L, 1, 0); } break; case MQTT_MSG_TYPE_PUBREC: if(pending_msg && pending_msg->msg_type == MQTT_MSG_TYPE_PUBLISH && pending_msg->msg_id == msg_id){ NODE_DBG("MQTT: Publish with QoS = 2 Received PUBREC\r\n"); // Note: actrually, should not destroy the msg until PUBCOMP is received. msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); temp_msg = mqtt_msg_pubrel(&mud->mqtt_state.mqtt_connection, msg_id); node = msg_enqueue(&(mud->mqtt_state.pending_msg_q), temp_msg, msg_id, MQTT_MSG_TYPE_PUBREL, (int)mqtt_get_qos(temp_msg->data) ); NODE_DBG("MQTT: Response PUBREL\r\n"); } break; case MQTT_MSG_TYPE_PUBREL: if(pending_msg && pending_msg->msg_type == MQTT_MSG_TYPE_PUBREC && pending_msg->msg_id == msg_id){ msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); temp_msg = mqtt_msg_pubcomp(&mud->mqtt_state.mqtt_connection, msg_id); node = msg_enqueue(&(mud->mqtt_state.pending_msg_q), temp_msg, msg_id, MQTT_MSG_TYPE_PUBCOMP, (int)mqtt_get_qos(temp_msg->data) ); NODE_DBG("MQTT: Response PUBCOMP\r\n"); } break; case MQTT_MSG_TYPE_PUBCOMP: if(pending_msg && pending_msg->msg_type == MQTT_MSG_TYPE_PUBREL && pending_msg->msg_id == msg_id){ NODE_DBG("MQTT: Publish with QoS = 2 successful\r\n"); msg_destroy(msg_dequeue(&(mud->mqtt_state.pending_msg_q))); if(mud->cb_puback_ref == LUA_NOREF) break; if(mud->self_ref == LUA_NOREF) break; if(mud->L == NULL) break; lua_rawgeti(mud->L, LUA_REGISTRYINDEX, mud->cb_puback_ref); lua_rawgeti(mud->L, LUA_REGISTRYINDEX, mud->self_ref); // pass the userdata to callback func in lua lua_call(mud->L, 1, 0); } break; case MQTT_MSG_TYPE_PINGREQ: temp_msg = mqtt_msg_pingresp(&mud->mqtt_state.mqtt_connection); node = msg_enqueue(&(mud->mqtt_state.pending_msg_q), temp_msg, msg_id, MQTT_MSG_TYPE_PINGRESP, (int)mqtt_get_qos(temp_msg->data) ); NODE_DBG("MQTT: Response PINGRESP\r\n"); break; case MQTT_MSG_TYPE_PINGRESP: // Ignore NODE_DBG("MQTT: PINGRESP received\r\n"); break; } // NOTE: this is done down here and not in the switch case above // because the PSOCK_READBUF_LEN() won't work inside a switch // statement due to the way protothreads resume. if(msg_type == MQTT_MSG_TYPE_PUBLISH) { length = mud->mqtt_state.message_length_read; if(mud->mqtt_state.message_length < mud->mqtt_state.message_length_read) { length -= mud->mqtt_state.message_length; in_buffer += mud->mqtt_state.message_length; NODE_DBG("Get another published message\r\n"); goto READPACKET; } } break; } if(node && (1==msg_size(&(mud->mqtt_state.pending_msg_q))) && mud->event_timeout == 0){ mud->event_timeout = MQTT_SEND_TIMEOUT; NODE_DBG("Sent: %d\n", node->msg.length); if( mud->secure ) espconn_secure_sent( pesp_conn, node->msg.data, node->msg.length ); else espconn_sent( pesp_conn, node->msg.data, node->msg.length ); } mud->keep_alive_tick = 0; NODE_DBG("receive, queue size: %d\n", msg_size(&(mud->mqtt_state.pending_msg_q))); NODE_DBG("leave mqtt_socket_received.\n"); return; }
/* * Send a message. * * The current thread will be blocked until any other thread * receives the message and calls msg_reply() for the target * object. When new message has been reached to the object, it * will be received by highest priority thread waiting for * that message. A thread can send a message to any object if * it knows the object id. */ int msg_send(object_t obj, void *msg, size_t size, u_long timeout) { struct msg_header *hdr; thread_t th; void *kmsg; int rc; if (!user_area(msg)) return EFAULT; if (size < sizeof(struct msg_header)) return EINVAL; sched_lock(); if (!object_valid(obj)) { sched_unlock(); return EINVAL; } if (obj->owner != cur_task() && !task_capable(CAP_IPC)) { sched_unlock(); return EPERM; } /* * A thread can not send a message when the * thread is already receiving from the target * object. This will obviously cause a deadlock. */ if (obj == cur_thread->recvobj) { sched_unlock(); return EDEADLK; } /* * Translate message address to the kernel linear * address. So that a receiver thread can access * the message via kernel pointer. We can catch * the page fault here. */ if ((kmsg = kmem_map(msg, size)) == NULL) { /* Error - no physical address for the message */ sched_unlock(); return EFAULT; } /* * The sender ID in the message header is filled * by the kernel. So, the receiver can trust it. */ hdr = (struct msg_header *)kmsg; hdr->task = cur_task(); /* Save information about the message block. */ cur_thread->msgaddr = kmsg; cur_thread->msgsize = size; /* * If receiver already exists, wake it up. * Highest priority thread will get this message. */ if (!queue_empty(&obj->recvq)) { th = msg_dequeue(&obj->recvq); sched_unsleep(th, 0); } /* * Sleep until we get a reply message. * Note: Do not touch any data in the object * structure after we wakeup. This is because the * target object may be deleted during we were * sleeping. */ cur_thread->sendobj = obj; msg_enqueue(&obj->sendq, cur_thread); rc = sched_tsleep(&ipc_event, timeout); if (rc == SLP_INTR) queue_remove(&cur_thread->ipc_link); cur_thread->sendobj = NULL; sched_unlock(); /* * Check sleep result. */ switch (rc) { case SLP_BREAK: return EAGAIN; /* Receiver has been terminated */ case SLP_INVAL: return EINVAL; /* Object has been deleted */ case SLP_INTR: return EINTR; /* Exception */ case SLP_TIMEOUT: return ETIMEDOUT; /* Timeout */ default: /* DO NOTHING */ break; } return 0; }
/* * Receive a message. * * A thread can receive a message from the object which was * created by any thread belongs to same task. If the message * has not arrived yet, it blocks until any message comes in. * * The size argument specifies the "maximum" size of the message * buffer to receive. If the sent message is larger than this * size, the kernel will automatically clip the message to the * receive buffer size. * * When message is received, the sender thread is removed from * object's send queue. So, another thread can receive the * subsequent message from that object. This is important for * the multi-thread server which receives some messages * simultaneously. */ int msg_receive(object_t obj, void *msg, size_t size, u_long timeout) { thread_t th; size_t len; int rc, err = 0; if (!user_area(msg)) return EFAULT; sched_lock(); if (!object_valid(obj)) { err = EINVAL; goto out; } if (obj->owner != cur_task()) { err = EACCES; goto out; } /* * Check if this thread finished previous receive * operation. A thread can not receive different * messages at once. */ if (cur_thread->recvobj) { err = EBUSY; goto out; } cur_thread->recvobj = obj; /* * If no message exists, wait until message arrives. */ while (queue_empty(&obj->sendq)) { /* * Block until someone sends the message. */ msg_enqueue(&obj->recvq, cur_thread); rc = sched_tsleep(&ipc_event, timeout); if (rc != 0) { /* * Receive is failed due to some reasons. */ switch (rc) { case SLP_INVAL: err = EINVAL; /* Object has been deleted */ break; case SLP_INTR: queue_remove(&cur_thread->ipc_link); err = EINTR; /* Got exception */ break; case SLP_TIMEOUT: err = ETIMEDOUT; /* Timeout */ break; default: panic("msg_receive"); break; } cur_thread->recvobj = NULL; goto out; } /* * Even if this thread is woken by the sender thread, * the message may be received by another thread * before this thread runs. This can occur when * higher priority thread becomes runnable at that * time. So, it is necessary to check the existence * of the sender, again. */ } th = msg_dequeue(&obj->sendq); /* * Copy out the message to the user-space. * The smaller buffer size is used as copy length * between sender and receiver thread. */ len = min(size, th->msgsize); if (len > 0) { if (umem_copyout(th->msgaddr, msg, len)) { msg_enqueue(&obj->sendq, th); cur_thread->recvobj = NULL; err = EFAULT; goto out; } } /* * Detach the message from the target object. */ cur_thread->sender = th; th->receiver = cur_thread; out: sched_unlock(); return err; }