/** * * Remove expectation and return next one * * \param ipp an IPPair * \param pexp pointer to previous Expectation * \param exp pointer to Expectation to remove * \param lexp pointer to head of Expectation ist * \return expectation */ static Expectation * RemoveExpectationAndGetNext(IPPair *ipp, Expectation *pexp, Expectation *exp, Expectation *lexp) { /* we remove the object so we get ref count down by 1 to remove reference * hold by the expectation */ (void) IPPairDecrUsecnt(ipp); SC_ATOMIC_SUB(expectation_count, 1); if (pexp == NULL) { IPPairSetStorageById(ipp, g_expectation_id, lexp); } else { pexp->next = lexp; } if (exp->data) { ExpectationData *expdata = (ExpectationData *)exp->data; if (expdata->DFree) { expdata->DFree(exp->data); } else { SCFree(exp->data); } } SCFree(exp); return lexp; }
/** * \brief cleanup & free the memory of a flow * * \param f flow to clear & destroy */ void FlowFree(Flow *f) { FLOW_DESTROY(f); SCFree(f); (void) SC_ATOMIC_SUB(flow_memuse, sizeof(Flow)); }
static void NetmapDerefConfig(void *conf) { NetmapIfaceConfig *pfp = (NetmapIfaceConfig *)conf; /* config is used only once but cost of this low. */ if (SC_ATOMIC_SUB(pfp->ref, 1) == 0) { SCFree(pfp); } }
void AFPDerefConfig(void *conf) { AFPIfaceConfig *pfp = (AFPIfaceConfig *)conf; /* Pcap config is used only once but cost of this low. */ if (SC_ATOMIC_SUB(pfp->ref, 1) == 0) { SCFree(pfp); } }
/** * \brief cleanup & free the memory of a flow * * \param f flow to clear & destroy */ void FlowFree(Flow *f) { FLOW_DESTROY(f); SCFree(f); size_t size = sizeof(Flow) + FlowStorageSize(); (void) SC_ATOMIC_SUB(flow_memuse, size); }
void HostFree(Host *h) { if (h != NULL) { HostClearMemory(h); SCMutexDestroy(&h->m); SCFree(h); (void) SC_ATOMIC_SUB(host_memuse, sizeof(Host)); } }
static void PfringDerefConfig(void *conf) { PfringIfaceConfig *pfp = (PfringIfaceConfig *)conf; if (SC_ATOMIC_SUB(pfp->ref, 1) == 0) { if (pfp->bpf_filter) { SCFree(pfp->bpf_filter); } SCFree(pfp); } }
void HostFree(Host *h) { if (h != NULL) { HostClearMemory(h); SC_ATOMIC_DESTROY(h->use_cnt); SCMutexDestroy(&h->m); SCFree(h); (void) SC_ATOMIC_SUB(host_memuse, (sizeof(Host) + HostStorageSize())); } }
/** * \brief Removes the entries exceding the max timeout value * * \param tag_ctx Tag context * \param ts the current time * * \retval 1 no tags or tags removed -- host is free to go (from tag perspective) * \retval 0 still active tags */ int TagTimeoutCheck(Host *host, struct timeval *tv) { DetectTagDataEntry *tde = NULL; DetectTagDataEntry *tmp = NULL; DetectTagDataEntry *prev = NULL; int retval = 1; if (host->tag == NULL) return 1; tmp = host->tag; prev = NULL; while (tmp != NULL) { if ((tv->tv_sec - tmp->last_ts) <= TAG_MAX_LAST_TIME_SEEN) { prev = tmp; tmp = tmp->next; retval = 0; continue; } /* timed out */ if (prev != NULL) { prev->next = tmp->next; tde = tmp; tmp = tde->next; SCFree(tde); (void) SC_ATOMIC_SUB(num_tags, 1); } else { host->tag = tmp->next; tde = tmp; tmp = tde->next; SCFree(tde); (void) SC_ATOMIC_SUB(num_tags, 1); } } return retval; }
static void DefragTrackerFree(DefragTracker *dt) { if (dt != NULL) { DefragTrackerClearMemory(dt); SCMutexDestroy(&dt->lock); SCFree(dt); (void) SC_ATOMIC_SUB(defrag_memuse, sizeof(DefragTracker)); } }
void IPPairFree(IPPair *h) { if (h != NULL) { IPPairClearMemory(h); SC_ATOMIC_DESTROY(h->use_cnt); SCMutexDestroy(&h->m); SCFree(h); (void) SC_ATOMIC_SUB(ippair_memuse, g_ippair_size); } }
void PfringDerefConfig(void *conf) { PfringIfaceConfig *pfp = (PfringIfaceConfig *)conf; if (SC_ATOMIC_SUB(pfp->ref, 1) == 0) { #ifdef HAVE_PFRING_SET_BPF_FILTER if (pfp->bpf_filter) { SCFree(pfp->bpf_filter); } #endif SCFree(pfp); } }
/** \brief allocate a flow * * We check against the memuse counter. If it passes that check we increment * the counter first, then we try to alloc. * * \retval f the flow or NULL on out of memory */ Flow *FlowAlloc(void) { Flow *f; if (!(FLOW_CHECK_MEMCAP(sizeof(Flow)))) { return NULL; } (void) SC_ATOMIC_ADD(flow_memuse, sizeof(Flow)); f = SCMalloc(sizeof(Flow)); if (f == NULL) { (void) SC_ATOMIC_SUB(flow_memuse, sizeof(Flow)); return NULL; } FLOW_INITIALIZE(f); return f; }
/** \brief allocate a flow * * We check against the memuse counter. If it passes that check we increment * the counter first, then we try to alloc. * * \retval f the flow or NULL on out of memory */ Flow *FlowAlloc(void) { Flow *f; size_t size = sizeof(Flow) + FlowStorageSize(); if (!(FLOW_CHECK_MEMCAP(size))) { return NULL; } (void) SC_ATOMIC_ADD(flow_memuse, size); f = SCMalloc(size); if (unlikely(f == NULL)) { (void)SC_ATOMIC_SUB(flow_memuse, size); return NULL; } memset(f, 0, size); FLOW_INITIALIZE(f); return f; }
/** \brief shutdown the flow engine * \warning Not thread safe */ void HostShutdown(void) { Host *h; uint32_t u; HostPrintStats(); /* free spare queue */ while((h = HostDequeue(&host_spare_q))) { BUG_ON(SC_ATOMIC_GET(h->use_cnt) > 0); HostFree(h); } /* clear and free the hash */ if (host_hash != NULL) { for (u = 0; u < host_config.hash_size; u++) { Host *h = host_hash[u].head; while (h) { Host *n = h->hnext; HostClearMemory(h); HostFree(h); h = n; } HRLOCK_DESTROY(&host_hash[u]); } SCFree(host_hash); host_hash = NULL; } (void) SC_ATOMIC_SUB(host_memuse, host_config.hash_size * sizeof(HostHashRow)); HostQueueDestroy(&host_spare_q); SC_ATOMIC_DESTROY(host_prune_idx); SC_ATOMIC_DESTROY(host_memuse); SC_ATOMIC_DESTROY(host_counter); //SC_ATOMIC_DESTROY(flow_flags); return; }
/** \brief shutdown the flow engine * \warning Not thread safe */ void DefragHashShutdown(void) { DefragTracker *dt; uint32_t u; DefragTrackerPrintStats(); /* free spare queue */ while((dt = DefragTrackerDequeue(&defragtracker_spare_q))) { BUG_ON(SC_ATOMIC_GET(dt->use_cnt) > 0); DefragTrackerFree(dt); } /* clear and free the hash */ if (defragtracker_hash != NULL) { for (u = 0; u < defrag_config.hash_size; u++) { dt = defragtracker_hash[u].head; while (dt) { DefragTracker *n = dt->hnext; DefragTrackerClearMemory(dt); DefragTrackerFree(dt); dt = n; } DRLOCK_DESTROY(&defragtracker_hash[u]); } SCFree(defragtracker_hash); defragtracker_hash = NULL; } (void) SC_ATOMIC_SUB(defrag_memuse, defrag_config.hash_size * sizeof(DefragTrackerHashRow)); DefragTrackerQueueDestroy(&defragtracker_spare_q); SC_ATOMIC_DESTROY(defragtracker_prune_idx); SC_ATOMIC_DESTROY(defrag_memuse); SC_ATOMIC_DESTROY(defragtracker_counter); //SC_ATOMIC_DESTROY(flow_flags); return; }
/** \brief shutdown the flow engine * \warning Not thread safe */ void IPPairShutdown(void) { IPPair *h; uint32_t u; IPPairPrintStats(); /* free spare queue */ while((h = IPPairDequeue(&ippair_spare_q))) { BUG_ON(SC_ATOMIC_GET(h->use_cnt) > 0); IPPairFree(h); } /* clear and free the hash */ if (ippair_hash != NULL) { for (u = 0; u < ippair_config.hash_size; u++) { h = ippair_hash[u].head; while (h) { IPPair *n = h->hnext; IPPairFree(h); h = n; } HRLOCK_DESTROY(&ippair_hash[u]); } SCFreeAligned(ippair_hash); ippair_hash = NULL; } (void) SC_ATOMIC_SUB(ippair_memuse, ippair_config.hash_size * sizeof(IPPairHashRow)); IPPairQueueDestroy(&ippair_spare_q); SC_ATOMIC_DESTROY(ippair_prune_idx); SC_ATOMIC_DESTROY(ippair_memuse); SC_ATOMIC_DESTROY(ippair_counter); SC_ATOMIC_DESTROY(ippair_config.memcap); //SC_ATOMIC_DESTROY(flow_flags); return; }
void DefragTrackerMoveToSpare(DefragTracker *h) { DefragTrackerEnqueue(&defragtracker_spare_q, h); (void) SC_ATOMIC_SUB(defragtracker_counter, 1); }
void IPPairMoveToSpare(IPPair *h) { IPPairEnqueue(&ippair_spare_q, h); (void) SC_ATOMIC_SUB(ippair_counter, 1); }
void HTPDecrMemuse(uint64_t size) { (void) SC_ATOMIC_SUB(htp_memuse, size); return; }
void TagHandlePacketHost(Host *host, Packet *p) { DetectTagDataEntry *tde = NULL; DetectTagDataEntry *prev = NULL; DetectTagDataEntry *iter; uint8_t flag_added = 0; iter = host->tag; prev = NULL; while (iter != NULL) { /* update counters */ iter->last_ts = p->ts.tv_sec; switch (iter->metric) { case DETECT_TAG_METRIC_PACKET: iter->packets++; break; case DETECT_TAG_METRIC_BYTES: iter->bytes += GET_PKT_LEN(p); break; } /* If this packet triggered the rule with tag, we dont need * to log it (the alert will log it) */ if (!(iter->flags & TAG_ENTRY_FLAG_SKIPPED_FIRST)) { iter->flags |= TAG_ENTRY_FLAG_SKIPPED_FIRST; } else { /* Update metrics; remove if tag expired; and set alerts */ switch (iter->metric) { case DETECT_TAG_METRIC_PACKET: if (iter->packets > iter->count) { /* tag expired */ if (prev != NULL) { tde = iter; prev->next = iter->next; iter = iter->next; SCFree(tde); (void) SC_ATOMIC_SUB(num_tags, 1); continue; } else { tde = iter; iter = iter->next; SCFree(tde); (void) SC_ATOMIC_SUB(num_tags, 1); host->tag = iter; continue; } } else if (flag_added == 0) { /* It's matching the tag. Add it to be logged and * update "flag_added" to add the packet once. */ p->flags |= PKT_HAS_TAG; flag_added++; } break; case DETECT_TAG_METRIC_BYTES: if (iter->bytes > iter->count) { /* tag expired */ if (prev != NULL) { tde = iter; prev->next = iter->next; iter = iter->next; SCFree(tde); (void) SC_ATOMIC_SUB(num_tags, 1); continue; } else { tde = iter; iter = iter->next; SCFree(tde); (void) SC_ATOMIC_SUB(num_tags, 1); host->tag = iter; continue; } } else if (flag_added == 0) { /* It's matching the tag. Add it to be logged and * update "flag_added" to add the packet once. */ p->flags |= PKT_HAS_TAG; flag_added++; } break; case DETECT_TAG_METRIC_SECONDS: /* last_ts handles this metric, but also a generic time based * expiration to prevent dead sessions/hosts */ if (iter->last_ts - iter->first_ts > iter->count) { /* tag expired */ if (prev != NULL) { tde = iter; prev->next = iter->next; iter = iter->next; SCFree(tde); (void) SC_ATOMIC_SUB(num_tags, 1); continue; } else { tde = iter; iter = iter->next; SCFree(tde); (void) SC_ATOMIC_SUB(num_tags, 1); host->tag = iter; continue; } } else if (flag_added == 0) { /* It's matching the tag. Add it to be logged and * update "flag_added" to add the packet once. */ p->flags |= PKT_HAS_TAG; flag_added++; } break; } } prev = iter; iter = iter->next; } }
static int LogFilestoreLogger(ThreadVars *tv, void *thread_data, const Packet *p, File *ff, const uint8_t *data, uint32_t data_len, uint8_t flags, uint8_t dir) { SCEnter(); LogFilestoreLogThread *aft = (LogFilestoreLogThread *)thread_data; char filename[PATH_MAX] = ""; int file_fd = -1; int ipver = -1; /* no flow, no htp state */ if (p->flow == NULL) { SCReturnInt(TM_ECODE_OK); } if (PKT_IS_IPV4(p)) { ipver = AF_INET; } else if (PKT_IS_IPV6(p)) { ipver = AF_INET6; } else { return 0; } SCLogDebug("ff %p, data %p, data_len %u", ff, data, data_len); char pid_expression[PATH_MAX] = ""; if (FileIncludePid()) snprintf(pid_expression, sizeof(pid_expression), ".%d", getpid()); char base_filename[PATH_MAX] = ""; if (snprintf(base_filename, sizeof(base_filename), "%s/file%s.%u", g_logfile_base_dir, pid_expression, ff->file_store_id) == sizeof(base_filename)) return -1; if (snprintf(filename, sizeof(filename), "%s%s", base_filename, g_working_file_suffix) == sizeof(filename)) return -1; if (flags & OUTPUT_FILEDATA_FLAG_OPEN) { aft->file_cnt++; /* create a .meta file that contains time, src/dst/sp/dp/proto */ LogFilestoreLogCreateMetaFile(p, ff, base_filename, ipver); if (SC_ATOMIC_GET(filestore_open_file_cnt) < FileGetMaxOpenFiles()) { SC_ATOMIC_ADD(filestore_open_file_cnt, 1); ff->fd = open(filename, O_CREAT | O_TRUNC | O_NOFOLLOW | O_WRONLY, 0644); if (ff->fd == -1) { SCLogDebug("failed to create file"); return -1; } file_fd = ff->fd; } else { file_fd = open(filename, O_CREAT | O_TRUNC | O_NOFOLLOW | O_WRONLY, 0644); if (file_fd == -1) { SCLogDebug("failed to create file"); return -1; } if (FileGetMaxOpenFiles() > 0) { StatsIncr(tv, aft->counter_max_hits); } ff->fd = -1; } /* we can get called with a NULL ffd when we need to close */ } else if (data != NULL) { if (ff->fd == -1) { file_fd = open(filename, O_APPEND | O_NOFOLLOW | O_WRONLY); if (file_fd == -1) { SCLogDebug("failed to open file %s: %s", filename, strerror(errno)); return -1; } } else { file_fd = ff->fd; } } if (file_fd != -1) { ssize_t r = write(file_fd, (const void *)data, (size_t)data_len); if (r == -1) { SCLogDebug("write failed: %s", strerror(errno)); if (ff->fd != -1) { SC_ATOMIC_SUB(filestore_open_file_cnt, 1); } ff->fd = -1; } if (ff->fd == -1) { close(file_fd); } } if (flags & OUTPUT_FILEDATA_FLAG_CLOSE) { if (ff->fd != -1) { close(ff->fd); ff->fd = -1; SC_ATOMIC_SUB(filestore_open_file_cnt, 1); } LogFilestoreFinalizeFiles(ff); } return 0; }
void HostMoveToSpare(Host *h) { HostEnqueue(&host_spare_q, h); (void) SC_ATOMIC_SUB(host_counter, 1); }