void print_sample(cli_flagged_sample_t *sample) { char *hash, *names; size_t i; if (!(sample)) return; hash = get_hash(sample->md5); if (!(hash)) return; cli_warnmsg("Sample[%s]:\n", hash); cli_warnmsg(" * Size: %zu\n", sample->size); cli_warnmsg(" * Hits: %u\n", sample->hits); free(hash); names = get_sample_names(sample->virus_name); if ((names)) cli_warnmsg(" * Names: %s\n", names); if (sample->sections && sample->sections->nsections) { for (i=0; i < sample->sections->nsections; i++) { hash = get_hash(sample->sections->sections[i].md5); if ((hash)) { cli_warnmsg(" * Section[%zu] (%zu): %s\n", i, sample->sections->sections[i].len, hash); free(hash); } } } if ((names)) free(names); }
fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty) { /* WIN32 */ unsigned int pages, mapsz, hdrsz; int pgsz = cli_getpagesize(); STATBUF st; fmap_t *m; const void *data; HANDLE fh; HANDLE mh; *empty = 0; if(FSTAT(fd, &st)) { cli_warnmsg("fmap: fstat failed\n"); return NULL; } if(offset < 0 || offset != fmap_align_to(offset, pgsz)) { cli_warnmsg("fmap: attempted mapping with unaligned offset\n"); return NULL; } if(!len) len = st.st_size - offset; /* bound checked later */ if(!len) { cli_dbgmsg("fmap: attempted void mapping\n"); *empty = 1; return NULL; } if(!CLI_ISCONTAINED(0, st.st_size, offset, len)) { cli_warnmsg("fmap: attempted oof mapping\n"); return NULL; } pages = fmap_align_items(len, pgsz); hdrsz = fmap_align_to(sizeof(fmap_t), pgsz); if((fh = (HANDLE)_get_osfhandle(fd)) == INVALID_HANDLE_VALUE) { cli_errmsg("fmap: cannot get a valid handle for descriptor %d\n", fd); return NULL; } if(!(mh = CreateFileMapping(fh, NULL, PAGE_READONLY, (DWORD)((len>>31)>>1), (DWORD)len, NULL))) { cli_errmsg("fmap: cannot create a map of descriptor %d\n", fd); CloseHandle(fh); return NULL; } if(!(data = MapViewOfFile(mh, FILE_MAP_READ, (DWORD)((offset>>31)>>1), (DWORD)(offset), len))) { cli_errmsg("fmap: cannot map file descriptor %d\n", fd); CloseHandle(mh); CloseHandle(fh); return NULL; } if(!(m = cl_fmap_open_memory(data, len))) { cli_errmsg("fmap: canot allocate fmap_t\n", fd); CloseHandle(mh); CloseHandle(fh); return NULL; } m->handle = (void*)(ssize_t)fd; m->handle_is_fd = 1; m->fh = fh; m->mh = mh; m->unmap = unmap_win32; return m; }
fmap_t *fmap_check_empty(int fd, off_t offset, size_t len, int *empty) { STATBUF st; fmap_t *m; *empty = 0; if(FSTAT(fd, &st)) { cli_warnmsg("fmap: fstat failed\n"); return NULL; } if(!len) len = st.st_size - offset; /* bound checked later */ if(!len) { cli_dbgmsg("fmap: attempted void mapping\n"); *empty = 1; return NULL; } if(!CLI_ISCONTAINED(0, st.st_size, offset, len)) { cli_warnmsg("fmap: attempted oof mapping\n"); return NULL; } m = cl_fmap_open_handle((void*)(ssize_t)fd, offset, len, pread_cb, 1); if (!m) return NULL; m->mtime = st.st_mtime; m->handle_is_fd = 1; return m; }
static always_inline void cli_stack_free(struct stack *stack, void *data) { uint16_t last_size; struct stack_chunk *chunk = stack->chunk; if (!chunk) { cli_warnmsg("cli_stack_free: stack empty!\n"); return; } if ((chunk->u.data + chunk->used) != ((char*)data + stack->last_size*sizeof(align_t))) { cli_warnmsg("cli_stack_free: wrong free order: %p, expected %p\n", data, chunk->u.data + chunk->used - stack->last_size*sizeof(align_t)); return; } last_size = *(uint16_t*)&chunk->u.data[chunk->used-2]; if (chunk->used < stack->last_size*sizeof(align_t)) { cli_warnmsg("cli_stack_free: last_size is corrupt!\n"); return; } chunk->used -= stack->last_size*sizeof(align_t); stack->last_size = last_size; if (!chunk->used) { stack->chunk = chunk->prev; free(chunk); } }
static int gpt_check_mbr(cli_ctx *ctx, size_t sectorsize) { struct mbr_boot_record pmbr; off_t pos = 0, mbr_base = 0; int ret = CL_CLEAN; unsigned i = 0; /* read the mbr */ mbr_base = sectorsize - sizeof(struct mbr_boot_record); pos = (MBR_SECTOR * sectorsize) + mbr_base; if (fmap_readn(*ctx->fmap, &pmbr, pos, sizeof(pmbr)) != sizeof(pmbr)) { cli_dbgmsg("cli_scangpt: Invalid primary MBR header\n"); return CL_EFORMAT; } /* convert mbr */ mbr_convert_to_host(&pmbr); /* check the protective mbr - warning */ if (pmbr.entries[0].type == MBR_PROTECTIVE) { /* check the efi partition matches the gpt spec */ if (pmbr.entries[0].firstLBA != GPT_PRIMARY_HDR_LBA) { cli_warnmsg("cli_scangpt: protective MBR first LBA is incorrect %u\n", pmbr.entries[0].firstLBA); } /* other entries are empty */ for (i = 1; i < MBR_MAX_PARTITION_ENTRIES; ++i) { if (pmbr.entries[i].type != MBR_EMPTY) { cli_warnmsg("cli_scangpt: protective MBR has non-empty partition\n"); break; } } } else if (pmbr.entries[0].type == MBR_HYBRID) { /* hybrid mbr detected */ cli_warnmsg("cli_scangpt: detected a hybrid MBR\n"); } else { /* non-protective mbr detected */ cli_warnmsg("cli_scangpt: detected a non-protective MBR\n"); } /* scan the bootloader segment - pushed to scanning mbr */ /* check if MBR size matches GPT size */ /* check if the MBR and GPT partitions align - heuristic */ /* scan the MBR partitions - additional scans */ return ret; }
void mpool_stats(struct MP *mp) { unsigned int i=0, ta=0, tu=0; struct MPMAP *mpm = &mp->mpm; cli_warnmsg("MEMORY POOL STATISTICS\n map \tsize\tused\t%\n"); while(mpm) { cli_warnmsg("- %u\t%u\t%u\t%f%%\n", i, mpm->size, mpm->usize, (float)mpm->usize/(float)mpm->size*100); ta+=mpm->size; tu+=mpm->usize; i++; mpm = mpm->next; } cli_warnmsg("MEMORY POOL SUMMARY\nMaps: %u\nTotal: %u\nUsed: %u (%f%%)\n", i, ta, tu, (float)tu/(float)ta*100); }
void cli_event_error_str(cli_events_t *ctx, const char *str) { if (!ctx) return; cli_warnmsg("events: %s\n", str); event_string(ctx, &ctx->errors, str); }
cli_crt *crtmgr_verify_pkcs7(crtmgr *m, const uint8_t *issuer, const uint8_t *serial, const void *signature, unsigned int signature_len, cli_crt_hashtype hashtype, const uint8_t *refhash, cli_vrfy_type vrfytype) { cli_crt *i; mp_int sig; int ret; if(signature_len < 1024/8 || signature_len > 4096/8+1) { cli_dbgmsg("crtmgr_verify_pkcs7: unsupported sig len: %u\n", signature_len); return NULL; } if((ret = mp_init(&sig))) { cli_errmsg("crtmgr_verify_pkcs7: mp_init failed with %d\n", ret); return NULL; } if((ret=mp_read_unsigned_bin(&sig, signature, signature_len))) { cli_warnmsg("crtmgr_verify_pkcs7: mp_read_unsigned_bin failed with %d\n", ret); return NULL; } for(i = m->crts; i; i = i->next) { if(vrfytype == VRFY_CODE && !i->codeSign) continue; if(vrfytype == VRFY_TIME && !i->timeSign) continue; if(!memcmp(i->issuer, issuer, sizeof(i->issuer)) && !memcmp(i->serial, serial, sizeof(i->serial)) && !crtmgr_rsa_verify(i, &sig, hashtype, refhash)) { break; } } mp_clear(&sig); return i; }
/* Stripe handling: stored block (type 0x1) */ static int dmg_stripe_store(cli_ctx *ctx, int fd, uint32_t index, struct dmg_mish_with_stripes *mish_set) { const void *obuf; int ret; size_t off = mish_set->stripes[index].dataOffset; size_t len = mish_set->stripes[index].dataLength; ssize_t written; cli_dbgmsg("dmg_stripe_store: stripe " STDu32 "\n", index); if (len == 0) return CL_CLEAN; obuf = (void *)fmap_need_off_once(*ctx->fmap, off, len); if (!obuf) { cli_warnmsg("dmg_stripe_store: fmap need failed on stripe " STDu32 "\n", index); return CL_EMAP; } written = cli_writen(fd, obuf, len); if (written < 0) { cli_errmsg("dmg_stripe_store: error writing bytes to file (out of disk space?)\n"); return CL_EWRITE; } else if (written != len) { cli_errmsg("dmg_stripe_store: error writing bytes to file (out of disk space?)\n"); return CL_EWRITE; } return CL_CLEAN; }
/* * Performs a full scan on the fileblob, returning ClamAV status: * CL_BREAK means clean * CL_CLEAN means unknown * CL_VIRUS means infected */ int fileblobScan(const fileblob *fb) { int rc; STATBUF sb; if(fb->isInfected) return CL_VIRUS; if(fb->fp == NULL || fb->fullname == NULL) { /* shouldn't happen, scan called before fileblobSetFilename */ cli_warnmsg("fileblobScan, fullname == NULL\n"); return CL_ENULLARG; /* there is no CL_UNKNOWN */ } if(fb->ctx == NULL) { /* fileblobSetCTX hasn't been called */ cli_dbgmsg("fileblobScan, ctx == NULL\n"); return CL_CLEAN; /* there is no CL_UNKNOWN */ } fflush(fb->fp); lseek(fb->fd, 0, SEEK_SET); FSTAT(fb->fd, &sb); if(cli_matchmeta(fb->ctx, fb->b.name, sb.st_size, sb.st_size, 0, 0, 0, NULL) == CL_VIRUS) return CL_VIRUS; rc = cli_magic_scandesc(fb->fd, fb->ctx); if(rc == CL_VIRUS) { cli_dbgmsg("%s is infected\n", fb->fullname); return CL_VIRUS; } cli_dbgmsg("%s is clean\n", fb->fullname); return CL_BREAK; }
static int ooxml_parse_document(int fd, cli_ctx *ctx) { int ret = CL_SUCCESS; xmlTextReaderPtr reader = NULL; cli_dbgmsg("in ooxml_parse_document\n"); /* perform engine limit checks in temporary tracking session */ ret = ooxml_updatelimits(fd, ctx); if (ret != CL_CLEAN) return ret; reader = xmlReaderForFd(fd, "properties.xml", NULL, CLAMAV_MIN_XMLREADER_FLAGS); if (reader == NULL) { cli_dbgmsg("ooxml_parse_document: xmlReaderForFd error\n"); return CL_SUCCESS; // internal error from libxml2 } ret = cli_msxml_parse_document(ctx, reader, ooxml_keys, num_ooxml_keys, 1); if (ret != CL_SUCCESS && ret != CL_ETIMEOUT && ret != CL_BREAK) cli_warnmsg("ooxml_parse_document: encountered issue in parsing properties document\n"); xmlTextReaderClose(reader); xmlFreeTextReader(reader); return ret; }
/* * Return clamav return code */ int blobGrow(blob *b, size_t len) { assert(b != NULL); assert(b->magic == BLOBCLASS); if(len == 0) return CL_SUCCESS; if(b->isClosed) { /* * Should be cli_dbgmsg, but I want to see them for now, * and cli_dbgmsg doesn't support debug levels */ cli_warnmsg("Growing closed blob\n"); b->isClosed = 0; } if(b->data == NULL) { assert(b->len == 0); assert(b->size == 0); b->data = cli_malloc(len); if(b->data) b->size = (off_t)len; } else { unsigned char *ptr = cli_realloc(b->data, b->size + len); if(ptr) { b->size += (off_t)len; b->data = ptr; } } return (b->data) ? CL_SUCCESS : CL_EMEM; }
int hashtab_insert(struct hashtable *s,const unsigned char* key,const size_t len,const element_data data) { struct element* element; struct element* deleted_element = NULL; size_t tries = 1; size_t idx; if(!s) return CL_ENULLARG; do { PROFILE_CALC_HASH(s); idx = hash(key, len, s->capacity); element = &s->htable[idx]; do { if(!element->key) { unsigned char* thekey; /* element not found, place is empty, insert*/ if(deleted_element) { /* reuse deleted elements*/ element = deleted_element; PROFILE_DELETED_REUSE(s, tries); } else { PROFILE_INSERT(s, tries); } thekey = cli_malloc(len+1); if(!thekey) return CL_EMEM; strncpy((char*)thekey,(const char*)key,len+1); element->key = thekey; element->data = data; s->used++; if(s->used > s->maxfill) { cli_dbgmsg("hashtab.c:Growing hashtable %p, because it has exceeded maxfill, old size:%ld\n",(void*)s,s->capacity); hashtab_grow(s); } return 0; } else if(element->key == DELETED_KEY) { deleted_element = element; } else if(strncmp((const char*)key,(const char*)element->key,len)==0) { PROFILE_DATA_UPDATE(s, tries); element->data = data;/* key found, update */ return 0; } else { idx = (idx + tries++) % s->capacity; element = &s->htable[idx]; } } while (tries <= s->capacity); /* no free place found*/ PROFILE_HASH_EXHAUSTED(s); cli_dbgmsg("hashtab.c: Growing hashtable %p, because its full, old size:%ld.\n",(void*)s,s->capacity); } while( hashtab_grow(s) >= 0 ); cli_warnmsg("hashtab.c: Unable to grow hashtable\n"); return CL_EMEM; }
/* * Transfer the contents of the text into a blob * The caller must free the returned blob if b is NULL */ blob * textToBlob(text *t, blob *b, int destroy) { size_t s; blob *bin; if(t == NULL) return NULL; s = 0; (void)textIterate(t, getLength, &s, 0); if(s == 0) return b; /* * copy b. If b is NULL and an error occurs we know we need to free * before returning */ bin = b; if(b == NULL) { b = blobCreate(); if(b == NULL) return NULL; } if(blobGrow(b, s) != CL_SUCCESS) { cli_warnmsg("Couldn't grow the blob: we may be low on memory\n"); #if 0 if(!destroy) { if(bin == NULL) blobDestroy(b); return NULL; } /* * We may be able to recover enough memory as we destroy to * create the blob */ #else if(bin == NULL) blobDestroy(b); return NULL; #endif } (void)textIterate(t, addToBlob, b, destroy); if(destroy && t->t_next) { textDestroy(t->t_next); t->t_next = NULL; } blobClose(b); return b; }
struct dirent *readdir_w(DIR *d) { WIN32_FIND_DATAW wfdw; wchar_t *name_w = NULL; char *name_a = NULL; DIRENT_VALIDATE(d, EFAULT, NULL); if (d->nfiles == 0) { if (!(name_w = cw_mb2wc(d->pattern))) { cli_warnmsg("[dirent] mb2wc(\"%s\") failed %d\n", d->pattern, GetLastError()); errno = EINVAL; return NULL; } if ((d->hdir = FindFirstFileW(name_w, &wfdw)) == INVALID_HANDLE_VALUE) { free(name_w); dirent_perror(d->pattern); } d->init = 1; } else if (!FindNextFileW(d->hdir, &wfdw)) dirent_perror("FindNextFileW()"); if (name_w) free(name_w); /* avoid messing with multi byte and wide chars if . or .. */ if (!wcscmp(wfdw.cFileName, L".")) name_a = _strdup("."); else if (!wcscmp(wfdw.cFileName, L"..")) name_a = _strdup(".."); else if (!(name_a = cw_wc2mb(wfdw.cFileName, WC_NO_BEST_FIT_CHARS))) { if (!wfdw.cAlternateFileName[0]) { fwprintf(stderr, L"[dirent] alternative name path not found (ntfs 8dot3 disabled?)\n", GetLastError()); errno = EINVAL; return NULL; } if (!(name_a = cw_wc2mb(wfdw.cAlternateFileName, 0))) { fwprintf(stderr, L"[dirent] alternative name path conversion failed (%d)\n", GetLastError()); errno = EINVAL; return NULL; } } DIRENT_VALIDATE(name_a, EFAULT, NULL); d->nfiles++; if (d->dent.d_name) free(d->dent.d_name); d->dent.d_name = name_a; return (&d->dent); }
static void unmap_mmap(fmap_t *m) { #ifdef ANONYMOUS_MAP size_t len = m->pages * m->pgsz + m->hdrsz; fmap_lock; if (munmap((void *)m, len) == -1) /* munmap() failed */ cli_warnmsg("funmap: unable to unmap memory segment at address: %p with length: %d\n", (void *)m, len); fmap_unlock; #endif }
void clamav_stats_decrement_count(const char *virname, const unsigned char *md5, size_t size, void *cbdata) { cli_intel_t *intel; cli_flagged_sample_t *sample; int err; intel = (cli_intel_t *)cbdata; if (!(intel)) return; #ifdef CL_THREAD_SAFE err = pthread_mutex_lock(&(intel->mutex)); if (err) { cli_warnmsg("clamav_stats_decrement_count: locking mutex failed (err: %d): %s\n", err, strerror(err)); return; } #endif sample = find_sample(intel, virname, md5, size, NULL); if (!(sample)) goto clamav_stats_decrement_end; if (sample->hits == 1) { if ((intel->engine->cb_stats_remove_sample)) intel->engine->cb_stats_remove_sample(virname, md5, size, intel); else clamav_stats_remove_sample(virname, md5, size, intel); goto clamav_stats_decrement_end; } sample->hits--; clamav_stats_decrement_end: #ifdef CL_THREAD_SAFE err = pthread_mutex_unlock(&(intel->mutex)); if (err) { cli_warnmsg("clamav_stats_decrement_count: unlocking mutex failed (err: %d): %s\n", err, strerror(err)); } #endif return; }
void fmap_unneed_off(fmap_t *m, size_t at, size_t len) { unsigned int i, first_page, last_page; if(m->dumb) return; if(!len) { cli_warnmsg("fmap_unneed: attempted void unneed\n"); return; } if(!CLI_ISCONTAINED(0, m->len, at, len)) { cli_warnmsg("fmap: attempted oof unneed\n"); return; } first_page = fmap_which_page(m, at); last_page = fmap_which_page(m, at + len - 1); for(i=first_page; i<=last_page; i++) { fmap_unneed_page(m, i); } }
static always_inline void* cli_stack_alloc(struct stack *stack, unsigned bytes) { struct stack_chunk *chunk = stack->chunk; uint16_t last_size_off; /* last_size is stored after data */ /* align bytes to pointer size */ bytes = (bytes + sizeof(uint16_t) + sizeof(align_t)) & ~(sizeof(align_t)-1); last_size_off = bytes - 2; if (chunk && (chunk->used + bytes <= STACK_CHUNKSIZE)) { /* there is still room in this chunk */ void *ret; *(uint16_t*)&chunk->u.data[chunk->used + last_size_off] = stack->last_size; stack->last_size = bytes/sizeof(align_t); ret = chunk->u.data + chunk->used; chunk->used += bytes; return ret; } if(bytes >= STACK_CHUNKSIZE) { cli_warnmsg("cli_stack_alloc: Attempt to allocate more than STACK_CHUNKSIZE bytes: %u!\n", bytes); return NULL; } /* not enough room here, allocate new chunk */ chunk = cli_malloc(sizeof(*stack->chunk)); if (!chunk) { cli_warnmsg("cli_stack_alloc: Unable to allocate memory for stack-chunk: bytes: %u!\n", sizeof(*stack->chunk)); return NULL; } *(uint16_t*)&chunk->u.data[last_size_off] = stack->last_size; stack->last_size = bytes/sizeof(align_t); chunk->used = bytes; chunk->prev = stack->chunk; stack->chunk = chunk; return chunk->u.data; }
void cli_pcre_perf_print() { struct sigperf_elem stats[MAX_TRACKED_PCRE], *elem = stats; int i, elems = 0, max_name_len = 0, name_len; if (!p_sigid || !p_sigevents) { cli_warnmsg("cli_pcre_perf_print: statistics requested but no PCREs were loaded!\n"); return; } memset(stats, 0, sizeof(stats)); for (i=0;i<MAX_TRACKED_PCRE;i++) { union ev_val val; uint32_t count; const char * name = cli_event_get_name(p_sigevents, i*PCRE_EVENTS_PER_SIG); cli_event_get(p_sigevents, i*PCRE_EVENTS_PER_SIG, &val, &count); if (!count) { if (name) cli_dbgmsg("No event triggered for %s\n", name); continue; } if (name) name_len = strlen(name); else name_len = 0; if (name_len > max_name_len) max_name_len = name_len; elem->name = name?name:"\"noname\""; elem->usecs = val.v_int; elem->run_count = count; cli_event_get(p_sigevents, i*PCRE_EVENTS_PER_SIG+1, &val, &count); elem->match_count = count; elem++; elems++; } if (max_name_len < strlen("PCRE Expression")) max_name_len = strlen("PCRE Expression"); cli_qsort(stats, elems, sizeof(struct sigperf_elem), sigelem_comp); elem = stats; /* name runs matches microsecs avg */ cli_infomsg (NULL, "%-*s %*s %*s %*s %*s\n", max_name_len, "PCRE Expression", 8, "#runs", 8, "#matches", 12, "usecs total", 9, "usecs avg"); cli_infomsg (NULL, "%-*s %*s %*s %*s %*s\n", max_name_len, "===============", 8, "=====", 8, "========", 12, "===========", 9, "========="); while (elem->run_count) { cli_infomsg (NULL, "%-*s %*lu %*lu %*llu %*.2f\n", max_name_len, elem->name, 8, elem->run_count, 8, elem->match_count, 12, (long long unsigned)elem->usecs, 9, (double)elem->usecs/elem->run_count); elem++; } }
static void handle_unneed_off(fmap_t *m, size_t at, size_t len) { unsigned int i, first_page, last_page; if(!m->aging) return; if(!len) { cli_warnmsg("fmap_unneed: attempted void unneed\n"); return; } at += m->nested_offset; if(!CLI_ISCONTAINED(0, m->real_len, at, len)) { cli_warnmsg("fmap: attempted oof unneed\n"); return; } first_page = fmap_which_page(m, at); last_page = fmap_which_page(m, at + len - 1); for(i=first_page; i<=last_page; i++) { fmap_unneed_page(m, i); } }
/* TODO: field in ctx, id of last bytecode that called magicscandesc, reset * after hooks/other bytecodes are run. TODO: need a more generic solution * to avoid uselessly recursing on bytecode-unpacked files, but also a way to * override the limit if we need it in a special situation */ int32_t cli_bcapi_write(struct cli_bc_ctx *ctx, uint8_t*data, int32_t len) { char err[128]; int32_t res; cli_ctx *cctx = (cli_ctx*)ctx->ctx; if (len < 0) { cli_warnmsg("Bytecode API: called with negative length!\n"); API_MISUSE(); return -1; } if (!ctx->outfd) { ctx->tempfile = cli_gentemp(cctx ? cctx->engine->tmpdir : NULL); if (!ctx->tempfile) { cli_dbgmsg("Bytecode API: Unable to allocate memory for tempfile\n"); cli_event_error_oom(EV, 0); return -1; } ctx->outfd = open(ctx->tempfile, O_RDWR|O_CREAT|O_EXCL|O_TRUNC|O_BINARY, 0600); if (ctx->outfd == -1) { ctx->outfd = 0; cli_warnmsg("Bytecode API: Can't create file %s: %s\n", ctx->tempfile, cli_strerror(errno, err, sizeof(err))); cli_event_error_str(EV, "cli_bcapi_write: Can't create temporary file"); free(ctx->tempfile); return -1; } cli_dbgmsg("bytecode opened new tempfile: %s\n", ctx->tempfile); } cli_event_fastdata(ctx->bc_events, BCEV_WRITE, data, len); if (cli_checklimits("bytecode api", cctx, ctx->written + len, 0, 0)) return -1; res = cli_writen(ctx->outfd, data, len); if (res > 0) ctx->written += res; if (res == -1) { cli_warnmsg("Bytecode API: write failed: %s\n", cli_strerror(errno, err, sizeof(err))); cli_event_error_str(EV, "cli_bcapi_write: write failed"); } return res; }
size_t clamav_stats_get_size(void *cbdata) { cli_intel_t *intel; cli_flagged_sample_t *sample; size_t sz, i; int err; intel = (cli_intel_t *)cbdata; if (!(intel)) return 0; sz = sizeof(cli_intel_t); #ifdef CL_THREAD_SAFE err = pthread_mutex_lock(&(intel->mutex)); if (err) { cli_warnmsg("clamav_stats_get_size: locking mutex failed (err: %d): %s\n", err, strerror(err)); return sz; } #endif for (sample = intel->samples; sample != NULL; sample = sample->next) { sz += sizeof(cli_flagged_sample_t); if ((sample->virus_name)) { for (i=0; sample->virus_name[i] != NULL; i++) sz += strlen(sample->virus_name[i]); sz += sizeof(char **) * i; } } #ifdef CL_THREAD_SAFE err = pthread_mutex_unlock(&(intel->mutex)); if (err) { cli_warnmsg("clamav_stats_get_size: unlocking mutex failed (err: %d): %s\n", err, strerror(err)); } #endif return sz; }
void clamav_stats_flush(struct cl_engine *engine, void *cbdata) { cli_intel_t *intel; cli_flagged_sample_t *sample, *next; int err; if (!(cbdata) || !(engine)) return; intel = (cli_intel_t *)cbdata; #ifdef CL_THREAD_SAFE err = pthread_mutex_lock(&(intel->mutex)); if (err) { cli_warnmsg("clamav_stats_flush: locking mutex failed (err: %d): %s\n", err, strerror(err)); return; } #endif for (sample=intel->samples; sample != NULL; sample = next) { next = sample->next; free_sample(sample); } intel->samples = NULL; intel->nsamples = 0; if (intel->hostid) { free(intel->hostid); intel->hostid = NULL; } #ifdef CL_THREAD_SAFE err = pthread_mutex_unlock(&(intel->mutex)); if (err) cli_warnmsg("clamav_stats_flush: unlocking mutex failed (err: %d): %s\n", err, strerror(err)); #endif }
void clamav_stats_remove_sample(const char *virname, const unsigned char *md5, size_t size, void *cbdata) { cli_intel_t *intel; cli_flagged_sample_t *sample; int err; intel = (cli_intel_t *)cbdata; if (!(intel)) return; #ifdef CL_THREAD_SAFE err = pthread_mutex_lock(&(intel->mutex)); if (err) { cli_warnmsg("clamav_stats_remove_sample: locking mutex failed (err: %d): %s\n", err, strerror(err)); return; } #endif while ((sample = find_sample(intel, virname, md5, size, NULL))) { if (sample->prev) sample->prev->next = sample->next; if (sample->next) sample->next->prev = sample->prev; if (sample == intel->samples) intel->samples = sample->next; free_sample(sample); intel->nsamples--; } #ifdef CL_THREAD_SAFE err = pthread_mutex_unlock(&(intel->mutex)); if (err) { cli_warnmsg("clamav_stats_remove_sample: unlocking mutex failed (err: %d): %s\n", err, strerror(err)); } #endif }
static int cab_unstore(struct cab_file *file, int bytes) { int todo, bread; unsigned char buff[4096]; if(bytes < 0) { cli_warnmsg("cab_unstore: bytes < 0\n"); return CL_EFORMAT; } todo = MIN((unsigned int) bytes, file->max_size); while(1) { if((unsigned int) todo <= sizeof(buff)) bread = todo; else bread = sizeof(buff); if((bread = cab_read(file, buff, bread)) == -1) { cli_dbgmsg("cab_unstore: cab_read failed for descriptor %d\n", file->fd); return file->error; } else if(cli_writen(file->ofd, buff, bread) != bread) { cli_warnmsg("cab_unstore: Can't write %d bytes to descriptor %d\n", bread, file->ofd); return CL_EWRITE; } todo -= bread; if(!bread || todo <= 0) break; } return CL_SUCCESS; }
int cli_bytecode_prepare_jit(struct cli_all_bc *bcs) { unsigned i; for (i=0;i<bcs->count;i++) { if (bcs->all_bcs[i].state == bc_skip) continue; if (bcs->all_bcs[i].state != bc_loaded && bcs->all_bcs[i].kind != BC_STARTUP) { cli_warnmsg("Cannot prepare for JIT, because it has already been converted to interpreter\n"); return CL_EBYTECODE; } } cli_dbgmsg("Cannot prepare for JIT, LLVM is not compiled or not linked\n"); return CL_EBYTECODE; }
static void fmap_unneed_page(fmap_t *m, unsigned int page) { uint32_t s = fmap_bitmap[page]; if((s & (FM_MASK_PAGED | FM_MASK_LOCKED)) == (FM_MASK_PAGED | FM_MASK_LOCKED)) { /* page is paged and locked: check lock count */ s &= FM_MASK_COUNT; if(s > 1) /* locked more than once: dec lock count */ fmap_bitmap[page]--; else if (s == 1) /* only one lock left: unlock and begin aging */ fmap_bitmap[page] = FM_MASK_COUNT | FM_MASK_PAGED; else cli_errmsg("fmap_unneed: inconsistent map state\n"); return; } cli_warnmsg("fmap_unneed: unneed on a unlocked page\n"); return; }
int cli_event_diff(cli_events_t *ctx1, cli_events_t *ctx2, unsigned id) { int diff = 0; struct cli_event *ev1, *ev2; ev1 = get_event(ctx1, id); ev2 = get_event(ctx2, id); if (!ev1 || !ev2) return 1; if (ev1->type != ev2->type || ev1->multiple != ev2->multiple || ev1->name != ev2->name) { cli_warnmsg("cli_event_diff: comparing incompatible events"); return 1; } if (ev1->count != ev2->count) { cli_dbgmsg("diff: %s count %u vs %u\n", ev1->name, ev1->count, ev2->count); return 1; } diff = 0; if (ev1->multiple == multiple_chain && ev1->type != ev_data) { unsigned i; for (i=0;i<ev1->count;i++) { unsigned di = ev_diff(ev1->type, &ev1->u.v_chain[i], &ev2->u.v_chain[i], ev1->count); if (di) { if (!diff) cli_dbgmsg("diff: %s\n", ev1->name); ev_debug(ev1->type, &ev1->u.v_chain[i], i); ev_debug(ev2->type, &ev2->u.v_chain[i], i); } diff += di; } } else { diff = ev_diff(ev1->type, &ev1->u, &ev2->u, ev1->count); if (diff) { cli_dbgmsg("diff: %s\n", ev1->name); ev_debug(ev1->type, &ev1->u, ev1->count); ev_debug(ev2->type, &ev2->u, ev2->count); } } if (!diff) return 0; return 1; }
extern cl_fmap_t *cl_fmap_open_memory(const void *start, size_t len) { int pgsz = cli_getpagesize(); cl_fmap_t *m = cli_calloc(1, sizeof(*m)); if (!m) { cli_warnmsg("fmap: map allocation failed\n"); return NULL; } m->data = start; m->len = len; m->real_len = len; m->pgsz = pgsz; m->pages = fmap_align_items(len, pgsz); m->unmap = unmap_malloc; m->need = mem_need; m->need_offstr = mem_need_offstr; m->gets = mem_gets; m->unneed_off = mem_unneed_off; return m; }