mq_stream_t *mq_stream_read_create(mq_context_t *mqc, mq_ongoing_t *on, char *host_id, int hid_len, mq_frame_t *fdata, mq_msg_t *remote_host, int to) { mq_stream_t *mqs; int ptype; type_malloc_clear(mqs, mq_stream_t, 1); mqs->mqc = mqc; mqs->ongoing = on; mqs->type = MQS_READ; mqs->want_more = MQS_MORE; mqs->host_id = host_id; mqs->hid_len = hid_len; mqs->timeout = to; mqs->msid = atomic_global_counter(); if (log_level() > 5) { char *str = mq_address_to_string(remote_host); log_printf(5, "remote_host=%s\n", str); if (str) free(str); } mq_get_frame(fdata, (void **)&(mqs->data), &(mqs->len)); mqs->sid_len = mqs->data[MQS_HANDLE_SIZE_INDEX]; type_malloc(mqs->stream_id, char, mqs->sid_len); memcpy(mqs->stream_id, &(mqs->data[MQS_HANDLE_INDEX]), mqs->sid_len); ptype = (mqs->data[MQS_PACK_INDEX] == MQS_PACK_COMPRESS) ? PACK_COMPRESS : PACK_NONE; log_printf(1, "msid=%d ptype=%d pack_type=%c\n", mqs->msid, ptype, mqs->data[MQS_PACK_INDEX]); mqs->pack = pack_create(ptype, PACK_READ, &(mqs->data[MQS_HEADER]), mqs->len - MQS_HEADER); log_printf(5, "data_len=%d more=%c MQS_HEADER=%d\n", mqs->len, mqs->data[MQS_STATE_INDEX], MQS_HEADER); unsigned char buffer[1024]; int n = (50 > mqs->len) ? mqs->len : 50; log_printf(5, "printing 1st 50 bytes mqsbuf=%s\n", mq_id2str((char *)mqs->data, n, (char *)buffer, 1024)); if (mqs->data[MQS_STATE_INDEX] == MQS_MORE) { //** More data coming so ask for it log_printf(5, "issuing read request\n"); mqs->remote_host = mq_msg_new(); mq_msg_append_msg(mqs->remote_host, remote_host, MQF_MSG_AUTO_FREE); if (log_level() >=15) { char *rhost = mq_address_to_string(mqs->remote_host); log_printf(15, "remote_host as string = %s\n", rhost); if (rhost) free(rhost); } log_printf(5, "before ongoing_inc\n"); mq_ongoing_host_inc(mqs->ongoing, mqs->remote_host, mqs->host_id, mqs->hid_len, mqs->timeout); log_printf(5, "after ongoing_inc\n"); mq_stream_read_request(mqs); } log_printf(5, "END\n"); return(mqs); }
int CommandProc::proc(const Link &link, const Request &req, Response *resp){ if(req.size() <= 0){ return -1; } if(log_level() >= Logger::LEVEL_DEBUG){ std::string log_buf = serialize_req(req); log_debug("req: %s", log_buf.c_str()); } int ret = 0; proc_map_t::iterator it = proc_map.find(req[0]); if(it == proc_map.end()){ resp->push_back("client_error"); resp->push_back("Unknown Command: " + req[0].String()); }else{ proc_t p = it->second; ret = (this->*p)(link, req, resp); } if(log_level() >= Logger::LEVEL_DEBUG){ std::string log_buf = serialize_req(*resp); log_debug("resp: %s", log_buf.c_str()); } return ret; }
static void set_ex_opts() { if (!ex_opts_set) { if (log_level() == LOG_DEBUG) ex_opts(EX_DEBUG | EX_VERBOSE); else if (log_level() == LOG_DETAIL) ex_opts(EX_VERBOSE); ex_opts_set = true; } }
void mq_ongoing_host_inc(mq_ongoing_t *on, mq_msg_t *remote_host, char *my_id, int id_len, int heartbeat) { ongoing_hb_t *oh; ongoing_table_t *table; mq_msg_hash_t hash; char *remote_host_string; apr_thread_mutex_lock(on->lock); char *str = mq_address_to_string(remote_host); log_printf(5, "remote_host=%s\n", str); free(str); hash = mq_msg_hash(remote_host); table = apr_hash_get(on->table, &hash, sizeof(mq_msg_hash_t)); //** Look up the remote host if (log_level() > 5) { remote_host_string = mq_address_to_string(remote_host); log_printf(5, "remote_host=%s hb=%d table=%p\n", remote_host_string, heartbeat, table); free(remote_host_string); } if (table == NULL) { //** New host so add it type_malloc_clear(table, ongoing_table_t, 1); { int result = (table->table = apr_hash_make(on->mpool)); assert(result != NULL); } table->remote_host = mq_msg_new(); mq_msg_append_msg(table->remote_host, remote_host, MQF_MSG_AUTO_FREE); table->remote_host_hash = hash; apr_hash_set(on->table, &(table->remote_host_hash), sizeof(mq_msg_hash_t), table); } table->count++; oh = apr_hash_get(table->table, my_id, id_len); //** Look up the id if (oh == NULL) { //** New host so add it type_malloc_clear(oh, ongoing_hb_t, 1); type_malloc(oh->id, char, id_len); memcpy(oh->id, my_id, id_len); oh->id_len = id_len; oh->heartbeat = heartbeat / on->send_divisor; if (oh->heartbeat < 1) oh->heartbeat = 1; if (log_level() > 5) { remote_host_string = mq_address_to_string(remote_host); log_printf(5, "remote_host=%s final hb=%d \n", remote_host_string, oh->heartbeat); free(remote_host_string); } oh->next_check = apr_time_now() + apr_time_from_sec(oh->heartbeat); apr_hash_set(table->table, oh->id, id_len, oh); }
int main(int argc, char **argv) { #ifdef HAVE_FT2 const char *text = "Test text!"; const char *font = "arial.ttf"; const char *file = "ttftest.gif"; const char *bg = "heading-yellow.gif"; const char *color = "#000000"; log_init(STDOUT_FILENO, LOG_ALL, L_warning); io_init_except(STDOUT_FILENO, STDOUT_FILENO, STDOUT_FILENO); mem_init(); dlink_init(); gif_init(); image_init(); ttf_init(); ttftest_log = log_source_register("ttftest"); log_level(LOG_ALL, L_verbose); if(argc > 1) text = argv[1]; if(argc > 2) font = argv[2]; if(argc > 3) file = argv[3]; if(argc > 4) bg = argv[4]; if(argc > 5) color = argv[5]; ttftest_write(text, font, file, bg, color); log_level(LOG_ALL, L_warning); log_source_unregister(ttftest_log); ttf_shutdown(); image_shutdown(); gif_shutdown(); dlink_shutdown(); mem_shutdown(); log_shutdown(); io_shutdown(); #endif return 0; }
/* * \brief function to print log section to fd * \param level the log level of the message to be logged * \param format String to be logged * \param stream stream where the section should be printed * \param header string to be printed as header * \param ... additional arguments for format */ void log_section(log_level_t level, FILE *stream, const char *header, const char *format, ...) { va_list args; va_start(args, format); if (log_level(0) >= level) { if (level > LOG_INFO) { char lvl_marker[24]; get_log_level_str(level, lvl_marker); fprintf(stream, "%s: ", lvl_marker); } fprintf(stream, "\n**** %s ****\n", header); vfprintf(stream, format, args); fprintf(stream, "\n"); } va_end(args); }
void NetworkServer::proc(ProcJob *job){ job->serv = this; job->result = PROC_OK; job->stime = millitime(); const Request *req = job->link->last_recv(); Response resp; do{ // AUTH if(this->need_auth && job->link->auth == false && req->at(0) != "auth"){ resp.push_back("noauth"); resp.push_back("authentication required"); break; } Command *cmd = proc_map.get_proc(req->at(0)); if(!cmd){ resp.push_back("client_error"); resp.push_back("Unknown Command: " + req->at(0).String()); break; } job->cmd = cmd; if (cmd->name != "client") { job->link->last_cmd = cmd->name; } if(cmd->flags & Command::FLAG_THREAD){ if(cmd->flags & Command::FLAG_WRITE){ if (this->readonly) { resp.reply_status(-1, "server is readonly"); break; } job->result = PROC_THREAD; writer->push(*job); }else{ job->result = PROC_THREAD; reader->push(*job); } return; } proc_t p = cmd->proc; job->time_wait = 1000 * (millitime() - job->stime); job->result = (*p)(this, job->link, *req, &resp); job->time_proc = 1000 * (millitime() - job->stime) - job->time_wait; }while(0); if(job->link->send(resp.resp) == -1){ job->result = PROC_ERROR; }else{ if(log_level() >= Logger::LEVEL_DEBUG){ log_debug("w:%.3f,p:%.3f, req: %s, resp: %s", job->time_wait, job->time_proc, serialize_req(*req).c_str(), serialize_req(resp.resp).c_str()); } } }
/** * Open log */ int log_open(const char *file, const char *ident, int level) { log_close(); lfd = strcasecmp(file, "stderr") ? open(file, O_WRONLY | O_CREAT | O_APPEND, 0600) : STDERR_FILENO; log_ident(ident); log_level(level); return lfd; }
/* * \brief function to set the log level from getopts str * \param level The log level to set * \param set when true the log level will be set * \return the current log level */ log_level_t set_log_level_str(char *level) { log_level_t ret; if (!strcmp(level, "QUIET")) { ret = log_level(LOG_QUIET); } else if(!strcmp(level, "INFO")) { ret = log_level(LOG_INFO); } else if(!strcmp(level, "WARN")) { ret = log_level(LOG_WARN); } else if(!strcmp(level, "ERROR")) { ret = log_level(LOG_ERROR); } else if(!strcmp(level, "DEBUG_THREAD")) { ret = log_level(LOG_DEBUG_THREAD); } else if(!strcmp(level, "DEBUG")) { ret = log_level(LOG_DEBUG); } else { log_stderr(LOG_ERROR, "Unrecognised log level"); ret = log_level(0); } /* print current log level */ char lev[16]; get_log_level_str(ret, lev); log_stdout(LOG_ERROR, "Log level currently set at %s", lev); return ret; }
gas_t *gas_new(config_t *cfg, struct boot *boot) { #ifndef HAVE_NETWORK // if we didn't build a network we need to default to SMP cfg->gas = HPX_GAS_SMP; #endif int ranks = boot_n_ranks(boot); gas_t *gas = NULL; libhpx_gas_t type = cfg->gas; // if we built a network, we might want to optimize for SMP if (ranks == 1 && cfg->opt_smp) { if (type != HPX_GAS_SMP && type != HPX_GAS_DEFAULT) { log_level(LEVEL, "GAS %s overridden to SMP.\n", HPX_GAS_TO_STRING[type]); cfg->gas = HPX_GAS_SMP; } type = HPX_GAS_SMP; } if (ranks > 1 && type == HPX_GAS_SMP) { dbg_error("SMP GAS selection fails for %d ranks\n", ranks); } switch (type) { case HPX_GAS_SMP: gas = gas_smp_new(); break; case HPX_GAS_AGAS: #if defined(HAVE_AGAS) && defined(HAVE_NETWORK) gas = gas_agas_new(cfg, boot); #endif break; case HPX_GAS_PGAS: #ifdef HAVE_NETWORK gas = gas_pgas_new(cfg, boot); #endif break; default: dbg_error("unexpected configuration value for --hpx-gas\n"); } if (!gas) { log_error("GAS %s failed to initialize\n", HPX_GAS_TO_STRING[type]); } else { log_gas("GAS %s initialized\n", HPX_GAS_TO_STRING[type]); } return gas; }
int Server::read_session(Session *sess){ Link *link = sess->link; if(link->error()){ return 0; } int len = link->read(); if(len <= 0){ this->close_session(sess); return -1; } while(1){ Request req; int ret = link->recv(&req.msg); if(ret == -1){ log_info("fd: %d, parse error, delete link", link->fd()); this->close_session(sess); return -1; }else if(ret == 0){ // 报文未就绪, 继续读网络 break; } req.stime = millitime(); req.sess = *sess; Response resp; for(int i=0; i<this->handlers.size(); i++){ Handler *handler = this->handlers[i]; req.time_wait = 1000 * (millitime() - req.stime); HandlerState state = handler->proc(req, &resp); req.time_proc = 1000 * (millitime() - req.stime) - req.time_wait; if(state == HANDLE_RESP){ link->send(resp.msg); if(link && !link->output.empty()){ fdes->set(link->fd(), FDEVENT_OUT, DEFAULT_TYPE, sess); } if(log_level() >= Logger::LEVEL_DEBUG){ log_debug("w:%.3f,p:%.3f, req: %s resp: %s", req.time_wait, req.time_proc, msg_str(req.msg).c_str(), msg_str(resp.msg).c_str()); } }else if(state == HANDLE_FAIL){ this->close_session(sess); return -1; } } } return 0; }
int main(const int argc, const char *argv[]) { event_set_log_callback(write_to_file_cb); event_enable_debug_mode(); //log_level(LOG_DEBUG); log_fileline(LOG_FILELINE_ON); log_level(LOG_WARN); server_t *server = server_init(DEFAULT_PORT); if(server == NULL) { log_err(__FILE__, __LINE__, "Cannot init server."); exit(-1); } int power = 10; server->service_idx = service_index_init(power); char *str = NULL; str = calloc(sizeof(char), 5); memcpy(str, "aaaa", 4); service_t *aaaa = service_init(str); aaaa = service_add(server->service_idx, power, aaaa); server->service_first = aaaa; server->service_last = aaaa; server->num_services++; str = calloc(sizeof(char), 5); memcpy(str, "bbbb", 4); service_t *bbbb = service_init(str); bbbb = service_add(server->service_idx, power, bbbb); aaaa->all_next = bbbb; server->service_last = bbbb; server->num_services++; str = calloc(sizeof(char), 5); memcpy(str, "cccc", 4); service_t *cccc = service_init(str); cccc = service_add(server->service_idx, power, cccc); bbbb->all_next = cccc; server->service_last = cccc; server->num_services++; server_event_run(server); return 0; }
/** Sets loglevel. * The loglevel is set using a textual levelname * @param l the logger * @param levelname the new loglevel to use * @return true if levelname had been valid, false otherwise */ bool_t log_level_set (log_t *l, const char *levelname) /*{{{*/ { int nlevel; bool_t rc; if ((nlevel = log_level (levelname)) != -1) { l -> level = nlevel; rc = true; } else rc = false; return rc; }/*}}}*/
void mq_stream_read_destroy(mq_stream_t *mqs) { log_printf(1, "START msid=%d\n", mqs->msid); if (mqs->mpool == NULL) { //** Nothing to do pack_destroy(mqs->pack); if (mqs->stream_id != NULL) free(mqs->stream_id); free(mqs); return; } //** Change the flag which signals we don't want anything else apr_thread_mutex_lock(mqs->lock); mqs->want_more = MQS_ABORT; //** Consume all the current data and request the pending while ((mqs->gop_processed != NULL) || (mqs->gop_waiting != NULL)) { log_printf(1, "Clearing pending processed=%d waiting=%p msid=%d\n", mqs->gop_processed, mqs->gop_waiting, mqs->msid); if (mqs->gop_processed != NULL) log_printf(1, "processed gid=%d\n", gop_id(mqs->gop_processed)); if (mqs->gop_waiting != NULL) log_printf(1, "waiting gid=%d\n", gop_id(mqs->gop_waiting)); mqs->want_more = MQS_ABORT; apr_thread_mutex_unlock(mqs->lock); mq_stream_read_wait(mqs); apr_thread_mutex_lock(mqs->lock); } if (log_level() >= 15) { char *rhost = mq_address_to_string(mqs->remote_host); log_printf(15, "remote_host as string = %s\n", rhost); if (rhost) free(rhost); } if (mqs->remote_host != NULL) mq_ongoing_host_dec(mqs->ongoing, mqs->remote_host, mqs->host_id, mqs->hid_len); apr_thread_mutex_unlock(mqs->lock); log_printf(2, "msid=%d transfer_packets=%d\n", mqs->msid, mqs->transfer_packets); //** Clean up if (mqs->stream_id != NULL) free(mqs->stream_id); pack_destroy(mqs->pack); apr_thread_mutex_destroy(mqs->lock); apr_thread_cond_destroy(mqs->cond); apr_pool_destroy(mqs->mpool); if (mqs->remote_host != NULL) mq_msg_destroy(mqs->remote_host); free(mqs); return; }
void Server::proc(ProcJob *job){ job->serv = this; job->result = PROC_OK; job->stime = millitime(); const Request *req = job->link->last_recv(); Response resp; proc_map_t::iterator it = proc_map.find(req->at(0)); if(it == proc_map.end()){ resp.push_back("client_error"); resp.push_back("Unknown Command: " + req->at(0).String()); }else{ Command *cmd = it->second; job->cmd = cmd; if(cmd->flags & Command::FLAG_THREAD){ if(cmd->flags & Command::FLAG_WRITE){ job->result = PROC_THREAD; writer->push(*job); return; ///// }else if(cmd->flags & Command::FLAG_READ){ job->result = PROC_THREAD; reader->push(*job); return; ///// }else{ log_error("bad command config: %s", cmd->name); } } proc_t p = cmd->proc; job->time_wait = 1000 *(millitime() - job->stime); job->result = (*p)(this, job->link, *req, &resp); job->time_proc = 1000 *(millitime() - job->stime); } if(job->result == PROC_BACKEND){ return; } if(job->link->send(resp) == -1){ job->result = PROC_ERROR; }else{ if(log_level() >= Logger::LEVEL_DEBUG){ log_debug("w:%.3f,p:%.3f, req: %s, resp: %s", job->time_wait, job->time_proc, serialize_req(*req).c_str(), serialize_req(resp).c_str()); } } }
void _opque_print_stack(Stack_t *stack) { op_generic_t *gop; int i=0; if (log_level() <= 15) return; move_to_top(stack); while ((gop = (op_generic_t *)get_ele_data(stack)) != NULL) { log_printf(15, " i=%d gid=%d type=%d\n", i, gop_id(gop), gop_get_type(gop)); i++; move_down(stack); } if (stack_size(stack) != i) log_printf(0, "Stack size mismatch! stack_size=%d i=%d\n", stack_size(stack), i); }
const message::string_type& message::decorated_message() const { if (_decorated_message.empty()) { ostream_type msg_decoration; if (!sending_logger().name().empty()) { msg_decoration << sending_logger().name() << " "; } msg_decoration << "<" << log_level().to_string() << "> "; decorate_message(msg_decoration.str(), raw_message(), _decorated_message); msg_decoration.str().swap(_postdec_decoration); } return _decorated_message; }
int main() { log_level(LOG_DEBUG); int ret = 0; if (test_enc_dec_remaining_pkt_len()) { ret += 1; } if (test_packet_creation()) { ret += 1; } if (test_utf8_encode_decode()) { ret += 1; } return ret; }
/** * Create a new context. */ XKB_EXPORT struct xkb_context * xkb_context_new(enum xkb_context_flags flags) { const char *env; struct xkb_context *ctx = calloc(1, sizeof(*ctx)); if (!ctx) return NULL; ctx->refcnt = 1; ctx->log_fn = default_log_fn; ctx->log_level = XKB_LOG_LEVEL_ERROR; ctx->log_verbosity = 0; /* Environment overwrites defaults. */ env = getenv("XKB_LOG_LEVEL"); if (env) xkb_context_set_log_level(ctx, log_level(env)); env = getenv("XKB_LOG_VERBOSITY"); if (env) xkb_context_set_log_verbosity(ctx, log_verbosity(env)); if (!(flags & XKB_CONTEXT_NO_DEFAULT_INCLUDES) && !xkb_context_include_path_append_default(ctx)) { log_err(ctx, "failed to add default include path %s\n", DFLT_XKB_CONFIG_ROOT); xkb_context_unref(ctx); return NULL; } ctx->use_environment_names = !(flags & XKB_CONTEXT_NO_ENVIRONMENT_NAMES); ctx->atom_table = atom_table_new(); if (!ctx->atom_table) { xkb_context_unref(ctx); return NULL; } return ctx; }
/* * \brief function to print log messages to stderr * \param level the log level of the message to be logged * \param format String to be logged * \param ... additional arguments for format */ void log_stderr(log_level_t level, const char *format, ...) { va_list args; va_start(args, format); if (log_level(0) >= level) { /* Automatically print msg type for std_err */ char lvl_marker[24]; get_log_level_str(level, lvl_marker); fprintf(stderr, "%s: ", lvl_marker); /* print error */ vfprintf(stderr, format, args); fprintf(stderr, "\n"); } va_end(args); }
/* * \brief function to print log messages to stdout * \param level the log level of the message to be logged * \param format String to be logged * \param ... additional arguments for format */ void log_stdout(log_level_t level, const char *format, ...) { va_list args; va_start(args, format); if (log_level(0) >= level) { if (level > LOG_INFO) { char lvl_marker[24]; get_log_level_str(level, lvl_marker); fprintf(stdout, "%s: ", lvl_marker); } vfprintf(stdout, format, args); fprintf(stdout, "\n"); } va_end(args); }
int main (int argc, char *argv[]) { const struct ud_renderer *r = 0; unsigned long num; const char *r_name; const char *file; log_level (6); test_assert (argc == 4); file = argv[1]; r_name = argv[2]; main_opts.ud_split_thresh = atoi (argv[3]); for (num = 0; num < sizeof (renderers) / sizeof (renderers[0]); ++num) if (str_same (renderers[num]->ur_data.ur_name, argv[2])) { r = renderers[num]; break; } test_assert (r != 0); test_assert (ud_init (&doc) == 1); doc.ud_opts = main_opts; test_assert (ud_open (&doc, argv[1]) == 1); test_assert (ud_parse (&doc) == 1); test_assert (ud_validate (&doc) == 1); test_assert (ud_partition (&doc, 0) == 1); test_assert (ud_render_doc (&doc, &r_opts, r, ".") == 1); test_assert (ud_free (&doc) == 1); return 0; }
void set_logging(char *optarg) { if(optarg == NULL) log_level(_LOG_DEFAULT); else if(strncmp(_LOG_SSDEBUG, optarg, strlen(_LOG_SSDEBUG)) == 0) log_level(_LOG_DEBUG); else if(strncmp(_LOG_SSINFO, optarg, strlen(_LOG_SSINFO)) == 0) log_level(_LOG_INFO); else if(strncmp(_LOG_SSWARN, optarg, strlen(_LOG_SSWARN)) == 0) log_level(_LOG_WARN); else if(strncmp(_LOG_SSERR, optarg, strlen(_LOG_SSERR)) == 0) log_level(_LOG_ERR); else if(strncmp(_LOG_SSINFO, optarg, strlen(_LOG_SSINFO)) == 0) log_level(_LOG_INFO); else log_level(_LOG_DEFAULT); // zu_set_log_level(log_level(0)); }
int NetworkServer::proc(ProcJob *job){ job->serv = this; job->result = PROC_OK; job->stime = millitime(); const Request *req = job->req; do{ // AUTH if(this->need_auth && job->link->auth == false && req->at(0) != "auth"){ job->resp.push_back("noauth"); job->resp.push_back("authentication required."); break; } job->cmd = proc_map.get_proc(req->at(0)); if(!job->cmd){ job->resp.push_back("client_error"); job->resp.push_back("Unknown Command: " + req->at(0).String()); break; } if(this->readonly && (job->cmd->flags & Command::FLAG_WRITE)){ job->resp.push_back("client_error"); job->resp.push_back("Forbidden Command: " + req->at(0).String()); break; } if(job->cmd->flags & Command::FLAG_THREAD){ if(job->cmd->flags & Command::FLAG_WRITE){ writer->push(job); }else{ reader->push(job); } return PROC_THREAD; } proc_t p = job->cmd->proc; job->time_wait = 1000 * (millitime() - job->stime); job->result = (*p)(this, job->link, *req, &job->resp); job->time_proc = 1000 * (millitime() - job->stime) - job->time_wait; }while(0); if(job->link->send(job->resp.resp) == -1){ job->result = PROC_ERROR; }else{ // try to write socket before it would be added to fdevents // socket is NONBLOCK, so it won't block. if(job->link->write() < 0){ job->result = PROC_ERROR; } } if(log_level() >= Logger::LEVEL_DEBUG){ // serialize_req is expensive log_debug("w:%.3f,p:%.3f, req: %s, resp: %s", job->time_wait, job->time_proc, serialize_req(*job->req).c_str(), serialize_req(job->resp.resp).c_str()); } return job->result; }
inline int log_debug_enable() { return log_level_compare(log_level(0), _LOG_DEBUG); }
static void cmd_acquire(struct task *task, struct cmd_args *ca) { struct client *cl; struct token *token = NULL; struct token *new_tokens[SANLK_MAX_RESOURCES]; struct sanlk_resource res; struct sanlk_options opt; struct space space; char *opt_str; int token_len, disks_len; int fd, rv, i, j, empty_slots, lvl; int alloc_count = 0, acquire_count = 0; int pos = 0, pid_dead = 0; int new_tokens_count; int recv_done = 0; int result = 0; int cl_ci = ca->ci_target; int cl_fd = ca->cl_fd; int cl_pid = ca->cl_pid; cl = &client[cl_ci]; fd = client[ca->ci_in].fd; new_tokens_count = ca->header.data; log_debug("cmd_acquire %d,%d,%d ci_in %d fd %d count %d", cl_ci, cl_fd, cl_pid, ca->ci_in, fd, new_tokens_count); if (new_tokens_count > SANLK_MAX_RESOURCES) { log_error("cmd_acquire %d,%d,%d new %d max %d", cl_ci, cl_fd, cl_pid, new_tokens_count, SANLK_MAX_RESOURCES); result = -E2BIG; goto done; } pthread_mutex_lock(&cl->mutex); if (cl->pid_dead) { result = -ESTALE; pthread_mutex_unlock(&cl->mutex); goto done; } empty_slots = 0; for (i = 0; i < SANLK_MAX_RESOURCES; i++) { if (!cl->tokens[i]) empty_slots++; } pthread_mutex_unlock(&cl->mutex); if (empty_slots < new_tokens_count) { log_error("cmd_acquire %d,%d,%d new %d slots %d", cl_ci, cl_fd, cl_pid, new_tokens_count, empty_slots); result = -ENOENT; goto done; } /* * read resource input and allocate tokens for each */ for (i = 0; i < new_tokens_count; i++) { /* * receive sanlk_resource, create token for it */ rv = recv(fd, &res, sizeof(struct sanlk_resource), MSG_WAITALL); if (rv > 0) pos += rv; if (rv != sizeof(struct sanlk_resource)) { log_error("cmd_acquire %d,%d,%d recv res %d %d", cl_ci, cl_fd, cl_pid, rv, errno); result = -ENOTCONN; goto done; } if (!res.num_disks || res.num_disks > SANLK_MAX_DISKS) { result = -ERANGE; goto done; } disks_len = res.num_disks * sizeof(struct sync_disk); token_len = sizeof(struct token) + disks_len; token = malloc(token_len); if (!token) { result = -ENOMEM; goto done; } memset(token, 0, token_len); token->disks = (struct sync_disk *)&token->r.disks[0]; /* shorthand */ token->r.num_disks = res.num_disks; memcpy(token->r.lockspace_name, res.lockspace_name, SANLK_NAME_LEN); memcpy(token->r.name, res.name, SANLK_NAME_LEN); if (res.flags & SANLK_RES_SHARED) token->r.flags |= SANLK_RES_SHARED; token->acquire_lver = res.lver; token->acquire_data64 = res.data64; token->acquire_data32 = res.data32; token->acquire_flags = res.flags; /* * receive sanlk_disk's / sync_disk's * * WARNING: as a shortcut, this requires that sync_disk and * sanlk_disk match; this is the reason for the pad fields * in sanlk_disk (TODO: let these differ?) */ rv = recv(fd, token->disks, disks_len, MSG_WAITALL); if (rv > 0) pos += rv; if (rv != disks_len) { log_error("cmd_acquire %d,%d,%d recv disks %d %d", cl_ci, cl_fd, cl_pid, rv, errno); free(token); result = -ENOTCONN; goto done; } /* zero out pad1 and pad2, see WARNING above */ for (j = 0; j < token->r.num_disks; j++) { token->disks[j].sector_size = 0; token->disks[j].fd = -1; } token->token_id = token_id_counter++; new_tokens[i] = token; alloc_count++; } rv = recv(fd, &opt, sizeof(struct sanlk_options), MSG_WAITALL); if (rv > 0) pos += rv; if (rv != sizeof(struct sanlk_options)) { log_error("cmd_acquire %d,%d,%d recv opt %d %d", cl_ci, cl_fd, cl_pid, rv, errno); result = -ENOTCONN; goto done; } strncpy(cl->owner_name, opt.owner_name, SANLK_NAME_LEN); if (opt.len) { opt_str = malloc(opt.len); if (!opt_str) { result = -ENOMEM; goto done; } rv = recv(fd, opt_str, opt.len, MSG_WAITALL); if (rv > 0) pos += rv; if (rv != opt.len) { log_error("cmd_acquire %d,%d,%d recv str %d %d", cl_ci, cl_fd, cl_pid, rv, errno); free(opt_str); result = -ENOTCONN; goto done; } } /* TODO: warn if header.length != sizeof(header) + pos ? */ recv_done = 1; /* * all command input has been received, start doing the acquire */ for (i = 0; i < new_tokens_count; i++) { token = new_tokens[i]; rv = lockspace_info(token->r.lockspace_name, &space); if (rv < 0 || space.killing_pids) { log_errot(token, "cmd_acquire %d,%d,%d invalid lockspace " "found %d failed %d name %.48s", cl_ci, cl_fd, cl_pid, rv, space.killing_pids, token->r.lockspace_name); result = -ENOSPC; goto done; } token->host_id = space.host_id; token->host_generation = space.host_generation; token->pid = cl_pid; if (cl->restrict & SANLK_RESTRICT_SIGKILL) token->flags |= T_RESTRICT_SIGKILL; /* save a record of what this token_id is for later debugging */ log_level(space.space_id, token->token_id, NULL, LOG_WARNING, "resource %.48s:%.48s:%.256s:%llu%s for %d,%d,%d", token->r.lockspace_name, token->r.name, token->r.disks[0].path, (unsigned long long)token->r.disks[0].offset, (token->acquire_flags & SANLK_RES_SHARED) ? ":SH" : "", cl_ci, cl_fd, cl_pid); } for (i = 0; i < new_tokens_count; i++) { token = new_tokens[i]; rv = acquire_token(task, token); if (rv < 0) { switch (rv) { case -EEXIST: case -EAGAIN: case -EBUSY: lvl = LOG_DEBUG; break; case SANLK_ACQUIRE_IDLIVE: lvl = com.quiet_fail ? LOG_DEBUG : LOG_ERR; break; default: lvl = LOG_ERR; } log_level(0, token->token_id, NULL, lvl, "cmd_acquire %d,%d,%d acquire_token %d", cl_ci, cl_fd, cl_pid, rv); result = rv; goto done; } acquire_count++; } /* * Success acquiring the leases: * lock mutex, * 1. if pid is live, move new_tokens to cl->tokens, clear cmd_active, unlock mutex * 2. if pid is dead, clear cmd_active, unlock mutex, release new_tokens, release cl->tokens, client_free * * Failure acquiring the leases: * lock mutex, * 3. if pid is live, clear cmd_active, unlock mutex, release new_tokens * 4. if pid is dead, clear cmd_active, unlock mutex, release new_tokens, release cl->tokens, client_free * * client_pid_dead() won't touch cl->tokens while cmd_active is set. * As soon as we clear cmd_active and unlock the mutex, client_pid_dead * will attempt to clear cl->tokens itself. If we find client_pid_dead * has already happened when we look at pid_dead, then we know that it * won't be called again, and it's our responsibility to clear cl->tokens * and call client_free. */ /* * We hold both space_mutex and cl->mutex at once to create the crucial * linkage between the client pid and the lockspace. Once we release * these two mutexes, if the lockspace fails, this pid will be killed. * Prior to inserting the new_tokens into the client, if the lockspace * fails, kill_pids/client_using_pid would not find this pid (assuming * it doesn't already hold other tokens using the lockspace). If * the lockspace failed while we were acquring the tokens, kill_pids * has already run and not found us, so we must revert what we've done * in acquire. * * Warning: * We could deadlock if we hold cl->mutex and take spaces_mutex, * because all_pids_dead() and kill_pids() hold spaces_mutex and take * cl->mutex. So, lock spaces_mutex first, then cl->mutex to avoid the * deadlock. * * Other approaches: * A solution may be to record in each sp all the pids/cis using it * prior to starting the acquire. Then we would not need to do this * check here to see if the lockspace has been killed (if it was, the * pid for this ci would have been killed in kill_pids), and * all_pids_dead() and kill_pids() would not need to go through each cl * and each cl->token to check if it's using the sp (it would know by * just looking at sp->pids[] and killing each). */ done: pthread_mutex_lock(&spaces_mutex); pthread_mutex_lock(&cl->mutex); log_debug("cmd_acquire %d,%d,%d result %d pid_dead %d", cl_ci, cl_fd, cl_pid, result, cl->pid_dead); pid_dead = cl->pid_dead; cl->cmd_active = 0; if (!result && !pid_dead) { if (check_new_tokens_space(cl, new_tokens, new_tokens_count)) { /* case 1 becomes case 3 */ log_error("cmd_acquire %d,%d,%d invalid lockspace", cl_ci, cl_fd, cl_pid); result = -ENOSPC; } } /* 1. Success acquiring leases, and pid is live */ if (!result && !pid_dead) { for (i = 0; i < new_tokens_count; i++) { for (j = 0; j < SANLK_MAX_RESOURCES; j++) { if (!cl->tokens[j]) { cl->tokens[j] = new_tokens[i]; break; } } } /* goto reply after mutex unlock */ } pthread_mutex_unlock(&cl->mutex); pthread_mutex_unlock(&spaces_mutex); /* 1. Success acquiring leases, and pid is live */ if (!result && !pid_dead) { /* work done before mutex unlock */ goto reply; } /* 2. Success acquiring leases, and pid is dead */ if (!result && pid_dead) { release_new_tokens(task, new_tokens, alloc_count, acquire_count); release_cl_tokens(task, cl); client_free(cl_ci); result = -ENOTTY; goto reply; } /* 3. Failure acquiring leases, and pid is live */ if (result && !pid_dead) { release_new_tokens(task, new_tokens, alloc_count, acquire_count); goto reply; } /* 4. Failure acquiring leases, and pid is dead */ if (result && pid_dead) { release_new_tokens(task, new_tokens, alloc_count, acquire_count); release_cl_tokens(task, cl); client_free(cl_ci); goto reply; } reply: if (!recv_done) client_recv_all(ca->ci_in, &ca->header, pos); send_result(fd, &ca->header, result); client_resume(ca->ci_in); }
void lldpctl_log_level(int level) { if (level >= 1) log_level(level-1); }
void *ongoing_heartbeat_thread(apr_thread_t *th, void *data) { mq_ongoing_t *on = (mq_ongoing_t *)data; apr_time_t timeout = apr_time_make(on->check_interval, 0); op_generic_t *gop; mq_msg_t *msg; ongoing_hb_t *oh; ongoing_table_t *table; apr_hash_index_t *hi, *hit; opque_t *q; char *id; mq_msg_hash_t *remote_hash; apr_time_t now; apr_ssize_t id_len; int n, k; char *remote_host_string; apr_thread_mutex_lock(on->lock); n = 0; do { now = apr_time_now() - apr_time_from_sec(5); //** Give our selves a little buffer log_printf(5, "Loop Start now=" TT "\n", apr_time_now()); q = new_opque(); // opque_start_execution(q); for (hit = apr_hash_first(NULL, on->table); hit != NULL; hit = apr_hash_next(hit)) { apr_hash_this(hit, (const void **)&remote_hash, &id_len, (void **)&table); k = apr_hash_count(table->table); if (log_level() > 1) { remote_host_string = mq_address_to_string(table->remote_host); log_printf(1, "host=%s count=%d\n", remote_host_string, k); free(remote_host_string); } for (hi = apr_hash_first(NULL, table->table); hi != NULL; hi = apr_hash_next(hi)) { apr_hash_this(hi, (const void **)&id, &id_len, (void **)&oh); log_printf(1, "id=%s now=" TT " next_check=" TT "\n", oh->id, apr_time_sec(apr_time_now()), apr_time_sec(oh->next_check)); if (now > oh->next_check) { log_printf(1, "id=%s sending HEARTBEAT EXEC SUBMIT nows=" TT " hb=%d\n", oh->id, apr_time_sec(apr_time_now()), oh->heartbeat); flush_log(); //** Form the message msg = mq_make_exec_core_msg(table->remote_host, 1); mq_msg_append_mem(msg, ONGOING_KEY, ONGOING_SIZE, MQF_MSG_KEEP_DATA); mq_msg_append_mem(msg, oh->id, oh->id_len, MQF_MSG_KEEP_DATA); mq_msg_append_mem(msg, NULL, 0, MQF_MSG_KEEP_DATA); //** Make the gop gop = new_mq_op(on->mqc, msg, ongoing_response_status, NULL, NULL, oh->heartbeat); gop_set_private(gop, table); opque_add(q, gop); oh->in_progress = 1; //** Flag it as in progress so it doesn't get deleted behind the scenes } } } log_printf(5, "Loop end now=" TT "\n", apr_time_now()); //** Wait for it to complete apr_thread_mutex_unlock(on->lock); opque_waitall(q); apr_thread_mutex_lock(on->lock); //** Dec the counters while ((gop = opque_waitany(q)) != NULL) { log_printf(0, "gid=%d gotone status=%d now=" TT "\n", gop_id(gop), (gop_get_status(gop)).op_status, apr_time_sec(apr_time_now())); table = gop_get_private(gop); table->count--; //** Update the next check for (hi = apr_hash_first(NULL, table->table); hi != NULL; hi = apr_hash_next(hi)) { apr_hash_this(hi, (const void **)&id, &id_len, (void **)&oh); oh->next_check = apr_time_now() + apr_time_from_sec(oh->heartbeat); //** Check if we get rid of it oh->in_progress = 0; if (oh->count <= 0) { //** Need to delete it apr_hash_set(table->table, id, id_len, NULL); free(oh->id); free(oh); } } gop_free(gop, OP_DESTROY); } opque_free(q, OP_DESTROY); now = apr_time_now(); log_printf(2, "sleeping %d now=" TT "\n", on->check_interval, now); //** Sleep until time for the next heartbeat or time to exit if (on->shutdown == 0) apr_thread_cond_timedwait(on->cond, on->lock, timeout); n = on->shutdown; now = apr_time_now() - now; log_printf(2, "main loop bottom n=%d dt=" TT " sec=" TT "\n", n, now, apr_time_sec(now)); } while (n == 0); log_printf(2, "CLEANUP\n"); for (hit = apr_hash_first(NULL, on->table); hit != NULL; hit = apr_hash_next(hit)) { apr_hash_this(hit, (const void **)&remote_hash, &id_len, (void **)&table); for (hi = apr_hash_first(NULL, table->table); hi != NULL; hi = apr_hash_next(hi)) { apr_hash_this(hi, (const void **)&id, &id_len, (void **)&oh); apr_hash_set(table->table, id, id_len, NULL); free(oh->id); free(oh); } apr_hash_set(on->table, &(table->remote_host_hash), sizeof(mq_msg_hash_t), NULL); mq_msg_destroy(table->remote_host); free(table); } log_printf(2, "EXITING\n"); apr_thread_mutex_unlock(on->lock); return(NULL); }
inline int log_info_enable() { return log_level_compare(log_level(0), _LOG_INFO); }
inline int log_warn_enable() { return log_level_compare(log_level(0), _LOG_WARN); }