uint32_t ib_ctx_time_converter::get_device_convertor_status(struct ibv_context* ctx) { uint32_t dev_status = 0; #ifdef DEFINED_IBV_EXP_CQ_TIMESTAMP int rval; // Checking if ibv_exp_query_device() is valid struct ibv_exp_device_attr device_attr; memset(&device_attr, 0, sizeof(device_attr)); device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_WITH_HCA_CORE_CLOCK; if ((rval = ibv_exp_query_device(ctx ,&device_attr)) || !device_attr.hca_core_clock) { vlog_printf(VLOG_DEBUG, "ib_ctx_time_converter::get_device_convertor_status :Error in querying hca core clock " "(ibv_exp_query_device() return value=%d ) (ibv context %p) (errno=%d %m)\n", rval, ctx, errno); } else { dev_status |= IBV_EXP_QUERY_DEVICE_SUPPORTED; } // Checking if ibv_exp_query_values() is valid struct ibv_exp_values queried_values; memset(&queried_values, 0, sizeof(queried_values)); queried_values.comp_mask = IBV_EXP_VALUES_HW_CLOCK; if ((rval = ibv_exp_query_values(ctx,IBV_EXP_VALUES_HW_CLOCK, &queried_values)) || !queried_values.hwclock) { vlog_printf(VLOG_DEBUG, "ib_ctx_time_converter::get_device_convertor_status :Error in querying hw clock, can't convert" " hw time to system time (ibv_exp_query_values() return value=%d ) (ibv context %p) (errno=%d %m)\n", rval, ctx, errno); } else { dev_status |= IBV_EXP_QUERY_VALUES_SUPPORTED; } #else NOT_IN_USE(ctx); #endif return dev_status; }
void fd_collection::statistics_print(int fd, vlog_levels_t log_level) { vlog_printf(log_level, "==================================================\n"); if (fd) { vlog_printf(log_level, "============ DUMPING FD %d STATISTICS ============\n", fd); g_p_fd_collection->statistics_print_helper(fd, log_level); } else { vlog_printf(log_level, "======= DUMPING STATISTICS FOR ALL OPEN FDS ======\n"); int fd_map_size = g_p_fd_collection->get_fd_map_size(); for (int i = 0 ; i < fd_map_size ; i++) { g_p_fd_collection->statistics_print_helper(i, log_level); } } vlog_printf(log_level, "==================================================\n"); }
void buffer_pool::buffersPanic() { if (isCircle(m_p_head)) { __log_info_err("Circle was found in buffer_pool"); // print mu & lambda of circle Floyd_LogCircleInfo(m_p_head); } else { __log_info_info("no circle was found in buffer_pool"); } // log backtrace const int MAX_BACKTRACE = 25; char **symbols; void *addresses[MAX_BACKTRACE]; int count = backtrace(addresses, MAX_BACKTRACE); symbols = backtrace_symbols(addresses, count); for (int i = 0; i < count; ++i) { vlog_printf(VLOG_ERROR, " %2d %s\n", i, symbols[i]); } __log_info_panic("m_n_buffers(%lu) > m_n_buffers_created(%lu)", m_n_buffers, m_n_buffers_created); }
static void xx_printf(char *fmt, ...) { va_list args; va_start(args, fmt); vlog_printf(0, fmt, args); va_end(args); }
static void chap_log(chap *_this, uint32_t prio, const char *fmt, ...) { const char *protostr; char logbuf[BUFSIZ]; va_list ap; CHAP_ASSERT(_this != NULL); CHAP_ASSERT(_this->ppp != NULL); switch (_this->type) { case PPP_AUTH_CHAP_MD5: protostr = "chap"; break; case PPP_AUTH_CHAP_MS_V2: protostr = "mschap_v2"; break; default: protostr = "unknown"; break; } va_start(ap, fmt); snprintf(logbuf, sizeof(logbuf), "ppp id=%u layer=chap proto=%s %s", _this->ppp->id, protostr, fmt); vlog_printf(prio, logbuf, ap); va_end(ap); }
void socket_fd_api::statistics_print(vlog_levels_t log_level /* = VLOG_DEBUG */) { int epoll_fd; epoll_fd_rec epoll_fd_rec; // Prepare data if ((epoll_fd = socket_fd_api::get_epoll_context_fd())) { m_econtext->get_fd_rec_by_fd(m_fd, epoll_fd_rec); } // Socket data vlog_printf(log_level, "Fd number : %d\n", m_fd); if (epoll_fd) { vlog_printf(log_level, "Socket epoll Fd : %d\n", epoll_fd); vlog_printf(log_level, "Socket epoll flags : 0x%x\n", epoll_fd_rec.events); } }
void vma_shmem_stats_close() { if (g_sh_mem_info.p_sh_stats && g_sh_mem_info.p_sh_stats != MAP_FAILED) { vlog_printf(VLOG_DEBUG, "%s: file '%s' fd %d shared memory at %p with %d max blocks\n", __func__, g_sh_mem_info.filename_sh_stats, g_sh_mem_info.fd_sh_stats, g_sh_mem_info.p_sh_stats, mce_sys.stats_fd_num_max); BULLSEYE_EXCLUDE_BLOCK_START if (munmap(g_sh_mem_info.p_sh_stats, SHMEM_STATS_SIZE(mce_sys.stats_fd_num_max)) != 0) { vlog_printf(VLOG_ERROR, "%s: file [%s] fd [%d] error while unmap shared memory at [%p]\n", __func__, g_sh_mem_info.filename_sh_stats, g_sh_mem_info.fd_sh_stats, g_sh_mem_info.p_sh_stats); } BULLSEYE_EXCLUDE_BLOCK_END g_sh_mem_info.p_sh_stats = MAP_FAILED; if (g_sh_mem_info.fd_sh_stats) close(g_sh_mem_info.fd_sh_stats); if(!g_is_forked_child) unlink(g_sh_mem_info.filename_sh_stats); } else if (g_sh_mem_info.p_sh_stats != MAP_FAILED) {
static void mppe_log(mppe *_this, uint32_t prio, const char *fmt, ...) { char logbuf[BUFSIZ]; va_list ap; va_start(ap, fmt); snprintf(logbuf, sizeof(logbuf), "ppp id=%u layer=mppe %s", _this->ppp->id, fmt); vlog_printf(prio, logbuf, ap); va_end(ap); }
void fd_collection::statistics_print_helper(int fd, vlog_levels_t log_level) { socket_fd_api* socket_fd; epfd_info* epoll_fd; if ((socket_fd = get_sockfd(fd))) { vlog_printf(log_level, "==================== SOCKET FD ===================\n"); socket_fd->statistics_print(log_level); goto found_fd; } if ((epoll_fd = get_epfd(fd))) { vlog_printf(log_level, "==================== EPOLL FD ====================\n"); epoll_fd->statistics_print(log_level); goto found_fd; } return; found_fd: vlog_printf(log_level, "==================================================\n"); }
void fifo_test() { sm_fifo my_fifo; int i=0; fifo_entry_t ret; test_entry arr_num[] = { {1, "one"}, {2, "two"}, {3, "three"}, {4, "four"}, {5, "five"}, {6, "six"}, {7, "seven"}, {8, "eight"}, {9, "nine"}, {10,"ten"} }; vlog_printf(VLOG_INFO, "fifo test\n"); while (i<10) { my_fifo.push_back(arr_num[i].event, (void *) arr_num[i].name ); vlog_printf(VLOG_ERROR, "element %d was inserted\n", arr_num[i]); my_fifo.debug_print_fifo(); ret = my_fifo.get_front(); vlog_printf(VLOG_ERROR, "element %d was removed (%s)\n", ret.event, ret.ev_data); my_fifo.debug_print_fifo(); i++; } /*while (i>0) { ret = my_fifo.get_element(); vlog_printf(VLOG_ERROR, "element %d was removeded\n", ret); my_fifo.debug_print_fifo(); i--; }*/ }
void check_flow_steering_log_num_mgm_entry_size() { static bool checked_mlx4_steering = false; if (checked_mlx4_steering) { return; } checked_mlx4_steering = true; char flow_steering_val[4] = {0}; if (priv_safe_try_read_file((const char*)FLOW_STEERING_MGM_ENTRY_SIZE_PARAM_FILE, flow_steering_val, sizeof(flow_steering_val)) == -1) { vlog_printf(VLOG_DEBUG, "Flow steering option for mlx4 driver does not exist in current OFED version\n"); } else if (flow_steering_val[0] != '-' || (strtol(&flow_steering_val[1], NULL, 0) % 2) == 0) { vlog_printf(VLOG_WARNING, "***************************************************************************************\n"); vlog_printf(VLOG_WARNING, "* VMA will not operate properly while flow steering option is disabled *\n"); vlog_printf(VLOG_WARNING, "* In order to enable flow steering please restart your VMA applications after running *\n"); vlog_printf(VLOG_WARNING, "* the following: *\n"); vlog_printf(VLOG_WARNING, "* For your information the following steps will restart your network interface *\n"); vlog_printf(VLOG_WARNING, "* 1. \"echo options mlx4_core log_num_mgm_entry_size=-1 > /etc/modprobe.d/mlnx.conf\" *\n"); vlog_printf(VLOG_WARNING, "* 2. Restart openibd or rdma service depending on your system configuration *\n"); vlog_printf(VLOG_WARNING, "* Read more about the Flow Steering support in the VMA's User Manual *\n"); vlog_printf(VLOG_WARNING, "***************************************************************************************\n"); } }
/** Log it which starts the label based on this instance. */ static int npppd_iface_log(npppd_iface *_this, int prio, const char *fmt, ...) { int status; char logbuf[BUFSIZ]; va_list ap; NPPPD_IFACE_ASSERT(_this != NULL); va_start(ap, fmt); snprintf(logbuf, sizeof(logbuf), "%s %s", _this->ifname, fmt); status = vlog_printf(prio, logbuf, ap); va_end(ap); return status; }
/* logging with the this PPTP instance */ static void pptpd_log(pptpd *_this, int prio, const char *fmt, ...) { char logbuf[BUFSIZ]; va_list ap; PPTPD_ASSERT(_this != NULL); va_start(ap, fmt); #ifdef PPTPD_MULITPLE snprintf(logbuf, sizeof(logbuf), "pptpd id=%u %s", _this->id, fmt); #else snprintf(logbuf, sizeof(logbuf), "pptpd %s", fmt); #endif vlog_printf(prio, logbuf, ap); va_end(ap); }
/* logging with the label of the l2tp instance. */ void l2tp_ctrl_log(l2tp_ctrl *_this, int prio, const char *fmt, ...) { char logbuf[BUFSIZ]; va_list ap; va_start(ap, fmt); #ifdef L2TPD_MULTIPLE snprintf(logbuf, sizeof(logbuf), "l2tpd id=%u ctrl=%u %s", _this->l2tpd->id, _this->id, fmt); #else snprintf(logbuf, sizeof(logbuf), "l2tpd ctrl=%u %s", _this->id, fmt); #endif vlog_printf(prio, logbuf, ap); va_end(ap); }
/* logging with the label for the instance */ static void pptp_call_log(pptp_call *_this, int prio, const char *fmt, ...) { char logbuf[BUFSIZ]; va_list ap; va_start(ap, fmt); #ifdef PPTPD_MULTIPLE snprintf(logbuf, sizeof(logbuf), "pptpd id=%u ctrl=%u call=%u %s", _this->ctrl->pptpd->id, _this->ctrl->id, _this->id, fmt); #else snprintf(logbuf, sizeof(logbuf), "pptpd ctrl=%u call=%u %s", _this->ctrl->id, _this->id, fmt); #endif vlog_printf(prio, logbuf, ap); va_end(ap); }
/** Record log that begins the label based this instance. */ int ppp_log(npppd_ppp *_this, int prio, const char *fmt, ...) { int status; char logbuf[BUFSIZ]; va_list ap; PPP_ASSERT(_this != NULL); va_start(ap, fmt); snprintf(logbuf, sizeof(logbuf), "ppp id=%u layer=base %s", _this->id, fmt); status = vlog_printf(prio, logbuf, ap); va_end(ap); return status; }
/* * Log */ static void pppoe_session_log(pppoe_session *_this, int prio, const char *fmt, ...) { char logbuf[BUFSIZ]; va_list ap; PPPOE_SESSION_ASSERT(_this != NULL); va_start(ap, fmt); #ifdef PPPOED_MULTIPLE snprintf(logbuf, sizeof(logbuf), "pppoed id=%u session=%d %s", _this->pppoed->id, _this->session_id, fmt); #else snprintf(logbuf, sizeof(logbuf), "pppoed if=%s session=%d %s", pppoe_session_listen_ifname(_this), _this->session_id, fmt); #endif vlog_printf(prio, logbuf, ap); va_end(ap); }
// full version of Floyd's cycle-finding algorithm // see: http://en.wikipedia.org/wiki/Cycle_detection#Tortoise_and_hare void Floyd_LogCircleInfo(Node x0) { // The main phase of the algorithm, finding a repetition x_mu = x_2mu // The hare moves twice as quickly as the tortoise Node tortoise = f(x0); // f(x0) is the element/node next to x0. Node hare = f(f(x0)); while (tortoise != hare) { tortoise = f(tortoise); hare = f(f(hare)); } // at this point tortoise position is equvi-distant from x0 // and current hare position (which is the same as tortoise position). This is // true because tortoise moved exactly half of the hare way. // so hare (set to tortoise-current position and move at tortoise speed) moving in // circle and tortoise (set to x0 ) moving towards circle, must meet at // current hare position (== current turtle position). Realize that they move // in same speed, the first intersection will be the beginning of the circle. // // Find the position of the first repetition of length mu // The hare and tortoise move at the same speeds int mu = 0; // first index that starts the circle hare = tortoise; tortoise = x0; const int MAX_STEPS = 1 << 24; // = 16M while (tortoise != hare) { tortoise = f(tortoise); hare = f(hare); mu++; if (mu > MAX_STEPS) break; // extra safety; not really needed } // Find the length of the shortest cycle starting from x_mu // The hare moves while the tortoise stays still int lambda = 1; //circle length hare = f(tortoise); while (tortoise != hare) { hare = f(hare); lambda++; if (lambda > MAX_STEPS) break; // extra safety; not really needed } vlog_printf (VLOG_ERROR, "circle first index (mu) = %d, circle length (lambda) = %d", mu, lambda); }
ts_conversion_mode_t ib_ctx_time_converter::get_devices_convertor_status(struct ibv_context** ibv_context_list, int num_devices) { ts_conversion_mode_t ctx_time_conversion_mode; vlog_printf(VLOG_DEBUG, "ib_ctx_time_converter::get_devices_convertor_status : Checking RX UDP HW time stamp " "status for all devices [%d], ibv_context_list = %p\n", num_devices, ibv_context_list); uint32_t devs_status = 0; #ifdef DEFINED_IBV_EXP_CQ_TIMESTAMP if (safe_mce_sys().rx_udp_hw_ts_conversion != TS_CONVERSION_MODE_DISABLE){ devs_status = IBV_EXP_QUERY_DEVICE_SUPPORTED | IBV_EXP_QUERY_VALUES_SUPPORTED; for (int i = 0; i < num_devices; i++) { devs_status &= get_device_convertor_status(ibv_context_list[i]); } } #endif switch (safe_mce_sys().rx_udp_hw_ts_conversion) { case TS_CONVERSION_MODE_RAW: ctx_time_conversion_mode = devs_status & IBV_EXP_QUERY_DEVICE_SUPPORTED ? TS_CONVERSION_MODE_RAW : TS_CONVERSION_MODE_DISABLE; break; case TS_CONVERSION_MODE_BEST_POSSIBLE: if (devs_status == (IBV_EXP_QUERY_DEVICE_SUPPORTED | IBV_EXP_QUERY_VALUES_SUPPORTED)) { ctx_time_conversion_mode = TS_CONVERSION_MODE_SYNC; } else { ctx_time_conversion_mode = devs_status & IBV_EXP_QUERY_DEVICE_SUPPORTED ? TS_CONVERSION_MODE_RAW : TS_CONVERSION_MODE_DISABLE; } break; case TS_CONVERSION_MODE_SYNC: ctx_time_conversion_mode = devs_status == (IBV_EXP_QUERY_DEVICE_SUPPORTED | IBV_EXP_QUERY_VALUES_SUPPORTED) ? TS_CONVERSION_MODE_SYNC : TS_CONVERSION_MODE_DISABLE; break; default: ctx_time_conversion_mode = TS_CONVERSION_MODE_DISABLE; break; } return ctx_time_conversion_mode; }
void ib_ctx_handler_collection::update_tbl(const char *ifa_name) { struct ibv_device **dev_list = NULL; ib_ctx_handler * p_ib_ctx_handler = NULL; int num_devices = 0; int i; ibchc_logdbg("Checking for offload capable IB devices..."); dev_list = vma_ibv_get_device_list(&num_devices); BULLSEYE_EXCLUDE_BLOCK_START if (!dev_list) { ibchc_logerr("Failure in vma_ibv_get_device_list() (error=%d %m)", errno); ibchc_logerr("Please check rdma configuration"); throw_vma_exception("No IB capable devices found!"); } if (!num_devices) { vlog_levels_t _level = ifa_name ? VLOG_DEBUG : VLOG_ERROR; // Print an error only during initialization. vlog_printf(_level, "VMA does not detect IB capable devices\n"); vlog_printf(_level, "No performance gain is expected in current configuration\n"); } BULLSEYE_EXCLUDE_BLOCK_END for (i = 0; i < num_devices; i++) { struct ib_ctx_handler::ib_ctx_handler_desc desc = {dev_list[i]}; /* 2. Skip existing devices (compare by name) */ if (ifa_name && !check_device_name_ib_name(ifa_name, dev_list[i]->name)) { continue; } if (ib_ctx_handler::is_mlx4(dev_list[i]->name)) { // Note: mlx4 does not support this capability. if(safe_mce_sys().enable_socketxtreme) { ibchc_logdbg("Blocking offload: mlx4 interfaces in socketxtreme mode"); continue; } // Check if mlx4 steering creation is supported. // Those setting are passed to the VM by the Hypervisor - NO NEED to specify the param on the VM. if (safe_mce_sys().hypervisor == mce_sys_var::HYPER_NONE) { check_flow_steering_log_num_mgm_entry_size(); } } /* 3. Add new ib devices */ p_ib_ctx_handler = new ib_ctx_handler(&desc); if (!p_ib_ctx_handler) { ibchc_logerr("failed allocating new ib_ctx_handler (errno=%d %m)", errno); continue; } m_ib_ctx_map[p_ib_ctx_handler->get_ibv_device()] = p_ib_ctx_handler; } ibchc_logdbg("Check completed. Found %d offload capable IB devices", m_ib_ctx_map.size()); if (dev_list) { ibv_free_device_list(dev_list); } }
void vma_shmem_stats_open(uint8_t** p_p_vma_log_level, uint8_t** p_p_vma_log_details) { void* buf; int ret; size_t shmem_size = 0; mode_t saved_mode; g_p_stats_data_reader = new stats_data_reader(); BULLSEYE_EXCLUDE_BLOCK_START if ( NULL == g_p_stats_data_reader ) { vlog_printf(VLOG_ERROR,"%s:%d: Can't allocate g_p_stats_data_reader \n", __func__, __LINE__); goto shmem_error; } BULLSEYE_EXCLUDE_BLOCK_END g_sh_mem_info.filename_sh_stats[0] = '\0'; g_sh_mem_info.p_sh_stats = MAP_FAILED; sprintf(g_sh_mem_info.filename_sh_stats, "/tmp/vmastat.%d", getpid()); saved_mode = umask(0); g_sh_mem_info.fd_sh_stats = open(g_sh_mem_info.filename_sh_stats, O_CREAT|O_RDWR, S_IRWXU | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH); umask(saved_mode); BULLSEYE_EXCLUDE_BLOCK_START if (g_sh_mem_info.fd_sh_stats < 0) { vlog_printf(VLOG_ERROR, "%s: Could not open %s %m\n", __func__, g_sh_mem_info.filename_sh_stats, errno); goto shmem_error; } BULLSEYE_EXCLUDE_BLOCK_END shmem_size = SHMEM_STATS_SIZE(mce_sys.stats_fd_num_max); buf = malloc(shmem_size); memset(buf, 0, shmem_size); ret = write(g_sh_mem_info.fd_sh_stats, buf, shmem_size); free(buf); BULLSEYE_EXCLUDE_BLOCK_START if (ret < 0) { vlog_printf(VLOG_ERROR, "%s: Could not write to %s - %m\n", __func__, g_sh_mem_info.filename_sh_stats, errno); goto shmem_error; } BULLSEYE_EXCLUDE_BLOCK_END g_sh_mem_info.p_sh_stats = mmap(0, shmem_size, PROT_WRITE|PROT_READ, MAP_SHARED, g_sh_mem_info.fd_sh_stats, 0); BULLSEYE_EXCLUDE_BLOCK_START if (g_sh_mem_info.p_sh_stats == MAP_FAILED) { vlog_printf(VLOG_ERROR, "%s: MAP_FAILED for %s - %m\n", __func__, g_sh_mem_info.filename_sh_stats); goto shmem_error; } BULLSEYE_EXCLUDE_BLOCK_END MAP_SH_MEM(g_sh_mem, g_sh_mem_info.p_sh_stats); write_version_details_to_shmem(&g_sh_mem->ver_info); g_sh_mem->max_skt_inst_num = mce_sys.stats_fd_num_max; g_sh_mem->reader_counter = 0; vlog_printf(VLOG_DEBUG, "%s: file '%s' fd %d shared memory at %p with %d max blocks\n", __func__, g_sh_mem_info.filename_sh_stats, g_sh_mem_info.fd_sh_stats, g_sh_mem_info.p_sh_stats, mce_sys.stats_fd_num_max); // Update the shmem initial log values g_sh_mem->log_level = **p_p_vma_log_level; g_sh_mem->log_details_level = **p_p_vma_log_details; // ReMap internal log level to ShMem area *p_p_vma_log_level = &g_sh_mem->log_level; *p_p_vma_log_details = &g_sh_mem->log_details_level; g_p_stats_data_reader->register_to_timer(); return; shmem_error: BULLSEYE_EXCLUDE_BLOCK_START if (g_sh_mem_info.fd_sh_stats > 0) { close(g_sh_mem_info.fd_sh_stats); unlink(g_sh_mem_info.filename_sh_stats); } g_sh_mem_info.fd_sh_stats = -1; g_sh_mem_info.p_sh_stats = MAP_FAILED; g_sh_mem = &g_local_sh_mem; memset((void*)g_sh_mem, 0, sizeof(sh_mem_t)); *p_p_vma_log_level = &g_sh_mem->log_level; *p_p_vma_log_details = &g_sh_mem->log_details_level; BULLSEYE_EXCLUDE_BLOCK_END }
void vma_shmem_stats_open(vlog_levels_t** p_p_vma_log_level, uint8_t** p_p_vma_log_details) { void *buf = NULL; void *p_shmem = NULL; int ret; size_t shmem_size = 0; mode_t saved_mode; g_p_stats_data_reader = new stats_data_reader(); BULLSEYE_EXCLUDE_BLOCK_START if (NULL == g_p_stats_data_reader) { vlog_printf(VLOG_ERROR,"%s:%d: Can't allocate g_p_stats_data_reader \n", __func__, __LINE__); goto shmem_error; } BULLSEYE_EXCLUDE_BLOCK_END shmem_size = SHMEM_STATS_SIZE(safe_mce_sys().stats_fd_num_max); buf = malloc(shmem_size); if (buf == NULL) goto shmem_error; memset(buf, 0, shmem_size); p_shmem = buf; if (strlen(safe_mce_sys().stats_shmem_dirname) <= 0) goto no_shmem; g_sh_mem_info.filename_sh_stats[0] = '\0'; g_sh_mem_info.p_sh_stats = MAP_FAILED; ret = snprintf(g_sh_mem_info.filename_sh_stats, sizeof(g_sh_mem_info.filename_sh_stats), "%s/vmastat.%d", safe_mce_sys().stats_shmem_dirname, getpid()); if (!((0 < ret) && (ret < (int)sizeof(g_sh_mem_info.filename_sh_stats)))) { vlog_printf(VLOG_ERROR, "%s: Could not create file under %s %m\n", __func__, safe_mce_sys().stats_shmem_dirname, errno); goto no_shmem; } saved_mode = umask(0); g_sh_mem_info.fd_sh_stats = open(g_sh_mem_info.filename_sh_stats, O_CREAT|O_RDWR, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); umask(saved_mode); BULLSEYE_EXCLUDE_BLOCK_START if (g_sh_mem_info.fd_sh_stats < 0) { vlog_printf(VLOG_ERROR, "%s: Could not open %s %m\n", __func__, g_sh_mem_info.filename_sh_stats, errno); goto no_shmem; } BULLSEYE_EXCLUDE_BLOCK_END ret = write(g_sh_mem_info.fd_sh_stats, buf, shmem_size); BULLSEYE_EXCLUDE_BLOCK_START if (ret < 0) { vlog_printf(VLOG_ERROR, "%s: Could not write to %s - %m\n", __func__, g_sh_mem_info.filename_sh_stats, errno); goto no_shmem; } BULLSEYE_EXCLUDE_BLOCK_END g_sh_mem_info.p_sh_stats = mmap(0, shmem_size, PROT_WRITE|PROT_READ, MAP_SHARED, g_sh_mem_info.fd_sh_stats, 0); BULLSEYE_EXCLUDE_BLOCK_START if (g_sh_mem_info.p_sh_stats == MAP_FAILED) { vlog_printf(VLOG_ERROR, "%s: MAP_FAILED for %s - %m\n", __func__, g_sh_mem_info.filename_sh_stats); goto no_shmem; } BULLSEYE_EXCLUDE_BLOCK_END p_shmem = g_sh_mem_info.p_sh_stats; free(buf); buf = NULL; goto success; no_shmem: if (g_sh_mem_info.p_sh_stats == MAP_FAILED) { if (g_sh_mem_info.fd_sh_stats > 0) { close(g_sh_mem_info.fd_sh_stats); unlink(g_sh_mem_info.filename_sh_stats); } } g_sh_mem_info.p_sh_stats = 0; success: MAP_SH_MEM(g_sh_mem, p_shmem); write_version_details_to_shmem(&g_sh_mem->ver_info); memcpy(g_sh_mem->stats_protocol_ver, STATS_PROTOCOL_VER, min(sizeof(g_sh_mem->stats_protocol_ver), sizeof(STATS_PROTOCOL_VER))); g_sh_mem->max_skt_inst_num = safe_mce_sys().stats_fd_num_max; g_sh_mem->reader_counter = 0; __log_dbg("file '%s' fd %d shared memory at %p with %d max blocks\n", g_sh_mem_info.filename_sh_stats, g_sh_mem_info.fd_sh_stats, g_sh_mem_info.p_sh_stats, safe_mce_sys().stats_fd_num_max); // Update the shmem initial log values g_sh_mem->log_level = **p_p_vma_log_level; g_sh_mem->log_details_level = **p_p_vma_log_details; // Update the shmem with initial fd dump values g_sh_mem->fd_dump = STATS_FD_STATISTICS_DISABLED; g_sh_mem->fd_dump_log_level = STATS_FD_STATISTICS_LOG_LEVEL_DEFAULT; // ReMap internal log level to ShMem area *p_p_vma_log_level = &g_sh_mem->log_level; *p_p_vma_log_details = &g_sh_mem->log_details_level; g_p_stats_data_reader->register_to_timer(); return; shmem_error: BULLSEYE_EXCLUDE_BLOCK_START g_sh_mem_info.fd_sh_stats = -1; g_sh_mem_info.p_sh_stats = MAP_FAILED; g_sh_mem = &g_local_sh_mem; g_sh_mem->reset(); *p_p_vma_log_level = &g_sh_mem->log_level; *p_p_vma_log_details = &g_sh_mem->log_details_level; BULLSEYE_EXCLUDE_BLOCK_END }