struct ipath * ipath_for_usednames(struct node *np) { struct ipath *ret, *ipp; int i = 0; struct node *np2; for (np2 = np; np2 != NULL; np2 = np2->u.name.next) i++; ret = MALLOC(sizeof (*ret) * (i + 1)); for (i = 0, np2 = np; np2 != NULL; np2 = np2->u.name.next) { ret[i].s = np2->u.name.s; ret[i++].i = 0; } ret[i].s = NULL; if ((ipp = lut_lookup(Ipaths, (void *)ret, (lut_cmp)ipath_cmp)) != NULL) { FREE(ret); return (ipp); } Ipaths = lut_add(Ipaths, (void *)ret, (void *)ret, (lut_cmp)ipath_cmp); stats_counter_bump(Nipath); stats_counter_add(Nbytes, (i + 1) * sizeof (struct ipath)); return (ret); }
struct ipath * ipath_dummy(struct node *np, struct ipath *ipp) { struct ipath *ret; ret = ipp; while (ipp[1].s != NULL) ipp++; if (strcmp(ipp[0].s, np->u.name.last->u.name.s) == 0) return (ret); ret = MALLOC(sizeof (*ret) * 2); ret[0].s = np->u.name.last->u.name.s; ret[0].i = 0; ret[1].s = NULL; if ((ipp = lut_lookup(Ipaths, (void *)ret, (lut_cmp)ipath_cmp)) != NULL) { FREE(ret); return (ipp); } Ipaths = lut_add(Ipaths, (void *)ret, (void *)ret, (lut_cmp)ipath_cmp); stats_counter_bump(Nipath); stats_counter_add(Nbytes, 2 * sizeof (struct ipath)); return (ret); }
/* * log_queue_rewind_backlog_all: * * Move items on our backlog back to our qoverflow queue. Please note that this * function does not really care about qoverflow size, it has to put the backlog * somewhere. The backlog is emptied as that will be filled if we send the * items again. * * NOTE: this is assumed to be called from the output thread. */ static void log_queue_fifo_rewind_backlog_all(LogQueue *s) { LogQueueFifo *self = (LogQueueFifo *) s; iv_list_splice_tail_init(&self->qbacklog, &self->qoverflow_output); self->qoverflow_output_len += self->qbacklog_len; stats_counter_add(self->super.stored_messages, self->qbacklog_len); self->qbacklog_len = 0; }
/* move items from the per-thread input queue to the lock-protected "wait" queue */ static void log_queue_fifo_move_input_unlocked(LogQueueFifo *self, gint thread_id) { gint queue_len; /* since we're in the input thread, queue_len will be racy. It can * increase due to log_queue_fifo_push_head() and can also decrease as * items are removed from the output queue using log_queue_pop_head(). * * The only reason we're using it here is to check for qoverflow * overflows, however the only side-effect of the race (if lost) is that * we would lose a couple of message too many or add some more messages to * qoverflow than permitted by the user. Since if flow-control is used, * the fifo size should be sized larger than the potential window sizes, * otherwise we can lose messages anyway, this is not deemed a cost to * justify proper locking in this case. */ queue_len = log_queue_fifo_get_length(&self->super); if (queue_len + self->qoverflow_input[thread_id].len > self->qoverflow_size) { /* slow path, the input thread's queue would overflow the queue, let's drop some messages */ LogPathOptions path_options = LOG_PATH_OPTIONS_INIT; gint i; gint n; /* NOTE: MAX is needed here to ensure that the lost race on queue_len * doesn't result in n < 0 */ n = self->qoverflow_input[thread_id].len - MAX(0, (self->qoverflow_size - queue_len)); for (i = 0; i < n; i++) { LogMessageQueueNode *node = iv_list_entry(self->qoverflow_input[thread_id].items.next, LogMessageQueueNode, list); LogMessage *msg = node->msg; iv_list_del(&node->list); self->qoverflow_input[thread_id].len--; path_options.ack_needed = node->ack_needed; stats_counter_inc(self->super.dropped_messages); log_msg_free_queue_node(node); log_msg_drop(msg, &path_options); } msg_debug("Destination queue full, dropping messages", evt_tag_int("queue_len", queue_len), evt_tag_int("log_fifo_size", self->qoverflow_size), evt_tag_int("count", n), evt_tag_str("persist_name", self->super.persist_name), NULL); } stats_counter_add(self->super.stored_messages, self->qoverflow_input[thread_id].len); iv_list_splice_tail_init(&self->qoverflow_input[thread_id].items, &self->qoverflow_wait); self->qoverflow_wait_len += self->qoverflow_input[thread_id].len; self->qoverflow_input[thread_id].len = 0; }
/* * ipath -- find instanced path in cache, or add it if necessary */ const struct ipath * ipath(struct node *np) { struct ipath *ret; int count; struct node *namep; int i; if ((ret = lut_lookup(Ipaths, (void *)np, (lut_cmp)ipath_epnamecmp)) != NULL) return (ret); /* already in cache */ /* * not in cache, make new cache entry. * start by counting the length of the name. */ count = 0; namep = np; while (namep != NULL) { ASSERTinfo(namep->t == T_NAME, ptree_nodetype2str(namep->t)); count++; namep = namep->u.name.next; } ASSERT(count > 0); /* allocate array for name and last NULL entry */ ret = MALLOC(sizeof (*ret) * (count + 1)); ret[count].s = NULL; /* fill in ipath entry */ namep = np; i = 0; while (namep != NULL) { ASSERT(i < count); ret[i].s = namep->u.name.s; if (namep->u.name.child != NULL && namep->u.name.child->t == T_NUM) ret[i].i = (int)namep->u.name.child->u.ull; else config_getcompname(namep->u.name.cp, NULL, &ret[i].i); i++; namep = namep->u.name.next; } /* add it to the cache */ Ipaths = lut_add(Ipaths, (void *)ret, (void *)ret, (lut_cmp)ipath_cmp); stats_counter_bump(Nipath); stats_counter_add(Nbytes, (count + 1) * sizeof (struct ipath)); return (ret); }