static bool req_filter(struct context *ctx, struct conn *conn, struct msg *msg) { if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty req %"PRIu64" from c %d", msg->id, conn->sd); req_put(msg); return true; } /* * Handle "quit\r\n", which is the protocol way of doing a * passive close */ if (msg->quit) { ASSERT(conn->rmsg == NULL); log_debug(LOG_INFO, "filter quit req %"PRIu64" from c %d", msg->id, conn->sd); conn->eof = 1; conn->recv_ready = 0; req_put(msg); return true; } return false; }
static bool dnode_rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; ASSERT(!conn->dnode_client && !conn->dnode_server); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "dyn: filter empty rsp %"PRIu64" on s %d", msg->id, conn->sd); dnode_rsp_put(msg); return true; } pmsg = TAILQ_FIRST(&conn->omsg_q); if (pmsg == NULL) { log_debug(LOG_INFO, "dyn: filter stray rsp %"PRIu64" len %"PRIu32" on s %d noreply %d", msg->id, msg->mlen, conn->sd, msg->noreply); dnode_rsp_put(msg); return true; } ASSERT(pmsg->peer == NULL); ASSERT(pmsg->request && !pmsg->done); return false; }
static bool dnode_req_filter(struct context *ctx, struct conn *conn, struct msg *msg) { ASSERT(conn->type == CONN_DNODE_PEER_CLIENT); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); if (log_loggable(LOG_VERB)) { log_debug(LOG_VERB, "dyn: filter empty req %"PRIu64" from c %d", msg->id, conn->sd); } req_put(msg); return true; } /* dynomite handler */ if (msg->dmsg != NULL) { if (dmsg_process(ctx, conn, msg->dmsg)) { req_put(msg); return true; } } return false; }
ENVELOPE* k_non_blocking_receive_message(int pid){ PCB* timer = gp_pcbs[pid]; ENV_QUEUE* env_q = &(timer->env_q); if(!msg_empty(env_q)){ return dequeue_env_queue(env_q); } return NULL; }
static bool req_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct server_pool *pool = conn->owner; ASSERT(conn->client && !conn->proxy); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty req %"PRIu64" from c %d", msg->id, conn->sd); req_put(msg); return true; } /* * Handle "quit\r\n", which is the protocol way of doing a * passive close */ if (msg->quit) { ASSERT(conn->rmsg == NULL); log_debug(LOG_INFO, "filter quit req %"PRIu64" from c %d", msg->id, conn->sd); conn->eof = 1; conn->recv_ready = 0; req_put(msg); return true; } /* * Handle "PING\r\n", which should test that all/one (depending on auto_eject_hosts and * failover pool) of the servers are actually alive. Until that day it's better to give * the client an incomplete answer than to kill them. */ if (msg->type == MSG_REQ_REDIS_PING) { log_debug(LOG_INFO, "filter ping req %"PRIu64" from c %d", msg->id, conn->sd); direct_reply(ctx, conn, msg, "+PONG\r\n"); return true; } /* Handle excessively large request payloads */ if (msg->vlen > pool->item_size_max) { ASSERT(conn->rmsg == NULL); log_debug(LOG_ERR, "filter size %"PRIu32" > max %"PRIu32" req %"PRIu64" from c %d", msg->vlen, pool->item_size_max, msg->id, conn->sd); conn->done = 1; req_put(msg); return true; } else { log_debug(LOG_DEBUG, "filter size %"PRIu32" req %"PRIu64" from c %d", msg->vlen, msg->id, conn->sd); } return false; }
ENVELOPE* dequeue_env_queue(ENV_QUEUE *q){ ENVELOPE *curHead = q->head; if (msg_empty(q)) return NULL; if (q->head == q->tail) { q->head = NULL; q->tail = NULL; } else q->head = q->head->nextMsg; curHead->nextMsg = NULL; return curHead; }
static bool rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; ASSERT(!conn->client && !conn->proxy); if(conn->is_Select_Msg){ conn->is_Select_Msg = 0; rsp_put(msg); log_debug(LOG_VERB," select success rsp %"PRIu64" len %"PRIu32" on s %d ", msg->id, msg->mlen, conn->sd); //ignore first response return true; } if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id, conn->sd); rsp_put(msg); return true; } pmsg = TAILQ_FIRST(&conn->omsg_q); if (pmsg == NULL) { log_error("filter stray rsp %"PRIu64" len %"PRIu32" on s %d", msg->id, msg->mlen, conn->sd); rsp_put(msg); errno = EINVAL; conn->err = errno; return true; } ASSERT(pmsg->peer == NULL); ASSERT(pmsg->request && !pmsg->done); if (pmsg->swallow) { conn->dequeue_outq(ctx, conn, pmsg); pmsg->done = 1; log_debug(LOG_INFO, "swallow rsp %"PRIu64" len %"PRIu32" of req " "%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id, conn->sd); rsp_put(msg); req_put(pmsg); return true; } return false; }
static bool rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; ASSERT(!conn->client && !conn->proxy); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id, conn->sd); rsp_put(msg); return true; } pmsg = TAILQ_FIRST(&conn->omsg_q); if (pmsg == NULL) { log_debug(LOG_VERB, "filter stray rsp %"PRIu64" len %"PRIu32" on s %d", msg->id, msg->mlen, conn->sd); rsp_put(msg); return true; } if (pmsg->noreply) { conn->dequeue_outq(ctx, conn, pmsg); rsp_put(pmsg); rsp_put(msg); return true; } ASSERT(pmsg->peer == NULL); ASSERT(pmsg->request && !pmsg->done); if (pmsg->swallow) { conn->dequeue_outq(ctx, conn, pmsg); pmsg->done = 1; if (log_loggable(LOG_DEBUG)) { log_debug(LOG_DEBUG, "swallow rsp %"PRIu64" len %"PRIu32" of req " "%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id, conn->sd); } rsp_put(msg); req_put(pmsg); return true; } return false; }
static bool rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; ASSERT(!conn->client && !conn->proxy); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id, conn->sd); rsp_put(msg); return true; } pmsg = TAILQ_FIRST(&conn->omsg_q); if (pmsg == NULL) { log_debug(LOG_ERR, "filter stray rsp %"PRIu64" len %"PRIu32" on s %d", msg->id, msg->mlen, conn->sd); rsp_put(msg); return true; } ASSERT(pmsg->peer == NULL); ASSERT(pmsg->request && !pmsg->done); /* establish msg <-> pmsg (response <-> request) link */ msg->peer = pmsg; pmsg->peer = msg; if (pmsg->swallow) { if (pmsg->pre_swallow != NULL) { pmsg->pre_swallow(ctx, conn, msg); } conn->dequeue_outq(ctx, conn, pmsg); pmsg->done = 1; log_debug(LOG_INFO, "swallow rsp %"PRIu64" len %"PRIu32" of req " "%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id, conn->sd); req_put(pmsg); return true; } return false; }
static bool req_filter(struct context *ctx, struct conn *conn, struct msg *msg) { ASSERT(conn->client && !conn->proxy); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty req %"PRIu64" from c %d", msg->id, conn->sd); req_put(msg); return true; } /* * Handle "quit\r\n" (memcache) or "*1\r\n$4\r\nquit\r\n" (redis), which * is the protocol way of doing a passive close. The connection is closed * as soon as all pending replies have been written to the client. */ if (msg->quit) { log_debug(LOG_INFO, "filter quit req %"PRIu64" from c %d", msg->id, conn->sd); if (conn->rmsg != NULL) { log_debug(LOG_INFO, "discard invalid req %"PRIu64" len %"PRIu32" " "from c %d sent after quit req", conn->rmsg->id, conn->rmsg->mlen, conn->sd); } conn->eof = 1; conn->recv_ready = 0; req_put(msg); return true; } /* * If this conn is not authenticated, we will mark it as noforward, * and handle it in the redis_reply handler. */ if (conn->need_auth) { msg->noforward = 1; } return false; }
static bool req_filter(struct context *ctx, struct conn *conn, struct msg *msg) { ASSERT(conn->client && !conn->proxy); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty req %"PRIu64" from c %d", msg->id, conn->sd); req_put(msg); return true; } /* * Handle "quit\r\n", which is the protocol way of doing a * passive close */ if (msg->quit) { ASSERT(conn->rmsg == NULL); log_debug(LOG_INFO, "filter quit req %"PRIu64" from c %d", msg->id, conn->sd); conn->eof = 1; conn->recv_ready = 0; req_put(msg); return true; } /* * if this conn is not authenticated, we will mark it as noforward, * and handle it in the redis_reply handler. * */ if (conn->need_auth) { msg->noforward = 1; } return false; }
static bool dnode_req_filter(struct context *ctx, struct conn *conn, struct msg *msg) { ASSERT(conn->dnode_client && !conn->dnode_server); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "dyn: filter empty req %"PRIu64" from c %d", msg->id, conn->sd); dnode_req_put(msg); return true; } /* dynomite hanlder */ if (msg->dmsg != NULL) { if (dmsg_process(ctx, conn, msg->dmsg)) { dnode_req_put(msg); return true; } } return false; }
static bool rsp_filter(struct context *ctx, struct conn *conn, struct msg *msg) { struct msg *pmsg; ASSERT(!conn->client && !conn->proxy); if (msg_empty(msg)) { ASSERT(conn->rmsg == NULL); log_debug(LOG_VERB, "filter empty rsp %"PRIu64" on s %d", msg->id, conn->sd); rsp_put(msg); return true; } pmsg = TAILQ_FIRST(&conn->omsg_q); if (pmsg == NULL) { log_debug(LOG_ERR, "filter stray rsp %"PRIu64" len %"PRIu32" on s %d", msg->id, msg->mlen, conn->sd); rsp_put(msg); /* * Memcached server can respond with an error response before it has * received the entire request. This is most commonly seen for set * requests that exceed item_size_max. IMO, this behavior of memcached * is incorrect. The right behavior for update requests that are over * item_size_max would be to either: * - close the connection Or, * - read the entire item_size_max data and then send CLIENT_ERROR * * We handle this stray packet scenario in nutcracker by closing the * server connection which would end up sending SERVER_ERROR to all * clients that have requests pending on this server connection. The * fix is aggressive, but not doing so would lead to clients getting * out of sync with the server and as a result clients end up getting * responses that don't correspond to the right request. * * See: https://github.com/twitter/twemproxy/issues/149 */ conn->err = EINVAL; conn->done = 1; return true; } ASSERT(pmsg->peer == NULL); ASSERT(pmsg->request && !pmsg->done); if (pmsg->swallow) { conn->swallow_msg(conn, pmsg, msg); conn->dequeue_outq(ctx, conn, pmsg); pmsg->done = 1; log_debug(LOG_INFO, "swallow rsp %"PRIu64" len %"PRIu32" of req " "%"PRIu64" on s %d", msg->id, msg->mlen, pmsg->id, conn->sd); rsp_put(msg); req_put(pmsg); return true; } return false; }
rstatus_t slowlog_command_make_reply(struct context *ctx, struct conn *conn, struct msg *msg, struct msg *pmsg) { rstatus_t status; uint32_t nkeys; struct keypos *kp; char *contents; uint32_t subcmdlen; ASSERT(conn->client && !conn->proxy); ASSERT(msg->request); ASSERT(pmsg != NULL && !pmsg->request); ASSERT(msg->owner == conn); ASSERT(conn->owner == ctx->manager); nkeys = array_n(msg->keys); ASSERT(nkeys >= 1); kp = array_get(msg->keys, 0); subcmdlen = (uint32_t)(kp->end-kp->start); if (subcmdlen==strlen("reset")&&!memcmp(kp->start,"reset",subcmdlen)){ if (nkeys != 1) { goto format_error; } slowlog_reset(); status = msg_append_full(pmsg, (uint8_t*)"OK", 2); if (status != NC_OK) { conn->err = ENOMEM; return status; } goto done; } else if (subcmdlen==strlen("id")&&!memcmp(kp->start,"id",subcmdlen)){ int buf_len; uint8_t buf[30]; long long id; if (nkeys != 1) { goto format_error; } pthread_rwlock_rdlock(&rwlocker); id = slowlog_entry_id; pthread_rwlock_unlock(&rwlocker); buf_len = nc_scnprintf(buf,30,"%lld",id); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { conn->err = ENOMEM; return status; } goto done; } else if (subcmdlen==strlen("len")&&!memcmp(kp->start,"len",subcmdlen)){ int len, buf_len; uint8_t buf[20]; if (nkeys != 1) { goto format_error; } pthread_rwlock_rdlock(&rwlocker); len = slowlog_len; pthread_rwlock_unlock(&rwlocker); buf_len = nc_scnprintf(buf,20,"%d",len); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { conn->err = ENOMEM; return status; } goto done; } else if (subcmdlen==strlen("get")&&!memcmp(kp->start,"get",subcmdlen)){ int count, sent = 0, buf_len; uint8_t buf[50]; slowlog_entry *se; struct string *str; if (nkeys == 1) { count = 10; } else if (nkeys == 2) { kp = array_get(msg->keys, 1); count = nc_atoi(kp->start, (kp->end-kp->start)); if (count < 0) { goto format_error; } } else { goto format_error; } pthread_rwlock_rdlock(&rwlocker); se = STAILQ_FIRST(&slowlog); while(count-- && se != NULL) { int nfield; uint32_t j; sent++; buf_len = nc_scnprintf(buf,50,"%d) 1) %lld\r\n",sent, se->id); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { pthread_rwlock_unlock(&rwlocker); conn->err = ENOMEM; return status; } buf_len = nc_scnprintf(buf,50," 2) %lld\r\n",se->time); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { pthread_rwlock_unlock(&rwlocker); conn->err = ENOMEM; return status; } buf_len = nc_scnprintf(buf,50," 3) %lld\r\n",se->duration); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { pthread_rwlock_unlock(&rwlocker); conn->err = ENOMEM; return status; } str = msg_type_string(se->cmdtype); nfield = 1; buf_len = nc_scnprintf(buf,50," 4) %d) %s\r\n",nfield++,str->data); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { pthread_rwlock_unlock(&rwlocker); conn->err = ENOMEM; return status; } buf_len = nc_scnprintf(buf,50," %d) %d\r\n",nfield++,se->keys_count); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { pthread_rwlock_unlock(&rwlocker); conn->err = ENOMEM; return status; } if (se->keys != NULL) { for (j = 0; j < array_n(se->keys); j ++) { str = array_get(se->keys, j); buf_len = nc_scnprintf(buf,50," %d) ",nfield++); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { pthread_rwlock_unlock(&rwlocker); conn->err = ENOMEM; return status; } status = msg_append_full(pmsg, str->data, (size_t)str->len); if (status != NC_OK) { pthread_rwlock_unlock(&rwlocker); conn->err = ENOMEM; return status; } status = msg_append_full(pmsg, (uint8_t *)CRLF, CRLF_LEN); if (status != NC_OK) { pthread_rwlock_unlock(&rwlocker); conn->err = ENOMEM; return status; } } } se = STAILQ_NEXT(se, next); } pthread_rwlock_unlock(&rwlocker); if (msg_empty(pmsg)) { status = msg_append_full(pmsg, (uint8_t*)"END", 3); if (status != NC_OK) { conn->err = ENOMEM; return status; } goto done; } return NC_OK; } else if (subcmdlen==strlen("overview") && !memcmp(kp->start,"overview",subcmdlen)) { int count, buf_len; uint8_t buf[50]; int j, idx, id; struct statistics_oneday *so; if (nkeys == 1) { count = 10; } else if (nkeys == 2) { kp = array_get(msg->keys, 1); count = nc_atoi(kp->start, (kp->end-kp->start)); if (count < 0) { goto format_error; } } else { goto format_error; } if (slowlog_statistics == NULL) { status = msg_append_full(pmsg, (uint8_t*)"END", 3); if (status != NC_OK) { conn->err = ENOMEM; return status; } goto done; } if (count > statistics_days) count = statistics_days; pthread_mutex_lock(&statistics_locker); idx = today_idx; id = 1; while (count--) { so = &slowlog_statistics[idx]; if (so->year == 0) break; buf_len = nc_scnprintf(buf,50,"%d) %d-%d-%d ",id++,so->year+1900,so->mon+1,so->day); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { pthread_mutex_unlock(&statistics_locker); conn->err = ENOMEM; return status; } for (j = 0; j < statistics_period-1; j ++) { buf_len = nc_scnprintf(buf,50,"%lld ",so->periods[j]); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { pthread_mutex_unlock(&statistics_locker); conn->err = ENOMEM; return status; } } buf_len = nc_scnprintf(buf,50,"%lld\r\n",so->periods[statistics_period-1]); status = msg_append_full(pmsg, buf, (size_t)buf_len); if (status != NC_OK) { pthread_mutex_unlock(&statistics_locker); conn->err = ENOMEM; return status; } if (--idx < 0) { idx = statistics_days - 1; } } pthread_mutex_unlock(&statistics_locker); if (msg_empty(pmsg)) { status = msg_append_full(pmsg, (uint8_t*)"END", 3); if (status != NC_OK) { conn->err = ENOMEM; return status; } goto done; } return NC_OK; } else { goto format_error; } format_error: contents = "ERR: slowlog command format is error."; status = msg_append_full(pmsg, (uint8_t *)contents, strlen(contents)); if (status != NC_OK) { conn->err = ENOMEM; return status; } goto done; done: status = msg_append_full(pmsg, (uint8_t *)CRLF, CRLF_LEN); if (status != NC_OK) { conn->err = ENOMEM; return status; } return NC_OK; }
void main() { bool keyboard_report_ready = false; bool consumer_report_ready = false; uint8_t prev_keycode = KC_NO; __xdata uint8_t recv_buffer[RECV_BUFF_SIZE]; __xdata uint8_t bytes_received; P0DIR = 0x00; // all outputs P0ALT = 0x00; // all GPIO default behavior LED_off(); usbInit(); //dbgInit(); rf_dngl_init(); reset_keyboard_report(); for (;;) { usbPoll(); // handles USB interrupts //dbgPoll(); // send chars from the uart TX buffer // try to read the recv buffer bytes_received = rf_dngl_recv(recv_buffer, RECV_BUFF_SIZE); if (bytes_received) { // we have new data, so what is it? if (recv_buffer[0] == MT_KEY_STATE) { process_key_state_msg(recv_buffer, bytes_received); consumer_report_ready = true; keyboard_report_ready = true; } else if (recv_buffer[0] == MT_TEXT) { process_text_msg(recv_buffer, bytes_received); } } if (!keyboard_report_ready && !msg_empty()) { // get the next char from the stored text message uint8_t c = msg_peek(); uint8_t new_keycode = get_keycode_for_char(c); reset_keyboard_report(); // if the keycode is different than the previous // otherwise just send an empty report to simulate key went up if (new_keycode != prev_keycode || new_keycode == KC_NO) { usb_keyboard_report.keys[0] = new_keycode; usb_keyboard_report.modifiers = get_modifiers_for_char(c); msg_pop(); // remove char from the buffer } else { new_keycode = KC_NO; } keyboard_report_ready = true; prev_keycode = new_keycode; // remember for later } // send the report if the endpoint is not busy if ((in1cs & 0x02) == 0 && (keyboard_report_ready || usbHasIdleElapsed())) { // copy the keyboard report into the endpoint buffer in1buf[0] = usb_keyboard_report.modifiers; in1buf[1] = 0; in1buf[2] = usb_keyboard_report.keys[0]; in1buf[3] = usb_keyboard_report.keys[1]; in1buf[4] = usb_keyboard_report.keys[2]; in1buf[5] = usb_keyboard_report.keys[3]; in1buf[6] = usb_keyboard_report.keys[4]; in1buf[7] = usb_keyboard_report.keys[5]; // send the data on it's way in1bc = 8; keyboard_report_ready = false; } // send the consumer report if the endpoint is not busy if ((in2cs & 0x02) == 0 && (consumer_report_ready || usbHasIdleElapsed())) { in2buf[0] = usb_consumer_report; in2bc = 1; consumer_report_ready = false; } } }