static int unzip_literal(void *ss, int len, int fd_out, uint32 *cksum, int out_is_pipe) { uchar *buf; ssize_t w,r; buf = malloc(len); if (!buf) { fatal("Failed to allocate literal buffer of size %d\n", len); } read_stream(ss, 1, buf, len); if (write(fd_out, buf, len) != len) { fatal("Failed to write literal buffer of size %d\n", len); } if (out_is_pipe) { w=0; while(w<len && (r=write(STDOUT_FILENO, buf+w, len-w))>0) w+=r; if(r<0) fatal("Failed to write literal buffer of size %d\n", len); } *cksum = crc32_buffer(buf, len, *cksum); free(buf); return len; }
static int unzip_match(void *ss, int len, int fd_out, int fd_hist, uint32 *cksum, int out_is_pipe) { unsigned offset; int n, total=0; off_t cur_pos = lseek(fd_out, 0, SEEK_CUR); offset = read_u32(ss, 0); ssize_t w,r; if (lseek(fd_hist, cur_pos-offset, SEEK_SET) == (off_t)-1) { fatal("Seek failed by %d from %d on history file in unzip_match - %s\n", offset, cur_pos, strerror(errno)); } while (len) { uchar *buf; n = MIN(len, offset); buf = malloc(n); if (!buf) { fatal("Failed to allocate %d bytes in unzip_match\n", n); } if (read(fd_hist, buf, n) != n) { fatal("Failed to read %d bytes in unzip_match\n", n); } if (write(fd_out, buf, n) != n) { fatal("Failed to write %d bytes in unzip_match\n", n); } if (out_is_pipe) { w=0; while(w<n && (r=write(STDOUT_FILENO, buf+w, n-w))>0) w+=r; if(r<0) fatal("Failed to write literal buffer of size %d\n", len); } *cksum = crc32_buffer(buf, n, *cksum); len -= n; free(buf); total += n; } return total; }
char *ssh_generate_name_from_buffer(const char *name, const unsigned char *blob, size_t bloblen) { unsigned char *ret; size_t namelen; SshUInt32 crc; if (!name) name = "???"; namelen = strlen(name); crc = crc32_buffer(blob, bloblen); ret = ssh_xmalloc(namelen + 10); ssh_ustrncpy(ret, ssh_custr(name), namelen); ret[namelen] = ' '; ssh_snprintf(ret + namelen + 1, 9, "%08lx", (unsigned long)crc); return (char *) ret; }
static void kafka_config_topic(rd_kafka_conf_t *conf, oconfig_item_t *ci) /* {{{ */ { int status; int i; struct kafka_topic_context *tctx; char *key; char *val; char callback_name[DATA_MAX_NAME_LEN]; char errbuf[1024]; user_data_t ud; oconfig_item_t *child; rd_kafka_conf_res_t ret; if ((tctx = calloc(1, sizeof (*tctx))) == NULL) { ERROR ("write_kafka plugin: calloc failed."); return; } tctx->escape_char = '.'; tctx->store_rates = 1; rd_kafka_conf_set_log_cb(conf, kafka_log); if ((tctx->kafka = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errbuf, sizeof(errbuf))) == NULL) { sfree(tctx); ERROR("write_kafka plugin: cannot create kafka handle."); return; } conf = NULL; if ((tctx->conf = rd_kafka_topic_conf_new()) == NULL) { rd_kafka_destroy(tctx->kafka); sfree(tctx); ERROR ("write_kafka plugin: cannot create topic configuration."); return; } if (ci->values_num != 1) { WARNING("kafka topic name needed."); goto errout; } if (ci->values[0].type != OCONFIG_TYPE_STRING) { WARNING("kafka topic needs a string argument."); goto errout; } if ((tctx->topic_name = strdup(ci->values[0].value.string)) == NULL) { ERROR("write_kafka plugin: cannot copy topic name."); goto errout; } for (i = 0; i < ci->children_num; i++) { /* * The code here could be simplified but makes room * for easy adding of new options later on. */ child = &ci->children[i]; status = 0; if (strcasecmp ("Property", child->key) == 0) { if (child->values_num != 2) { WARNING("kafka properties need both a key and a value."); goto errout; } if (child->values[0].type != OCONFIG_TYPE_STRING || child->values[1].type != OCONFIG_TYPE_STRING) { WARNING("kafka properties needs string arguments."); goto errout; } key = child->values[0].value.string; val = child->values[0].value.string; ret = rd_kafka_topic_conf_set(tctx->conf,key, val, errbuf, sizeof(errbuf)); if (ret != RD_KAFKA_CONF_OK) { WARNING("cannot set kafka topic property %s to %s: %s.", key, val, errbuf); goto errout; } } else if (strcasecmp ("Key", child->key) == 0) { char *tmp_buf = NULL; status = cf_util_get_string(child, &tmp_buf); if (status != 0) { WARNING("write_kafka plugin: invalid key supplied"); break; } if (strcasecmp(tmp_buf, "Random") != 0) { tctx->has_key = 1; tctx->key = crc32_buffer((u_char *)tmp_buf, strlen(tmp_buf)); } sfree(tmp_buf); } else if (strcasecmp ("Format", child->key) == 0) { status = cf_util_get_string(child, &key); if (status != 0) goto errout; assert(key != NULL); if (strcasecmp(key, "Command") == 0) { tctx->format = KAFKA_FORMAT_COMMAND; } else if (strcasecmp(key, "Graphite") == 0) { tctx->format = KAFKA_FORMAT_GRAPHITE; } else if (strcasecmp(key, "Json") == 0) { tctx->format = KAFKA_FORMAT_JSON; } else { WARNING ("write_kafka plugin: Invalid format string: %s", key); } sfree(key); } else if (strcasecmp ("StoreRates", child->key) == 0) { status = cf_util_get_boolean (child, &tctx->store_rates); (void) cf_util_get_flag (child, &tctx->graphite_flags, GRAPHITE_STORE_RATES); } else if (strcasecmp ("GraphiteSeparateInstances", child->key) == 0) { status = cf_util_get_flag (child, &tctx->graphite_flags, GRAPHITE_SEPARATE_INSTANCES); } else if (strcasecmp ("GraphiteAlwaysAppendDS", child->key) == 0) { status = cf_util_get_flag (child, &tctx->graphite_flags, GRAPHITE_ALWAYS_APPEND_DS); } else if (strcasecmp ("GraphitePrefix", child->key) == 0) { status = cf_util_get_string (child, &tctx->prefix); } else if (strcasecmp ("GraphitePostfix", child->key) == 0) { status = cf_util_get_string (child, &tctx->postfix); } else if (strcasecmp ("GraphiteEscapeChar", child->key) == 0) { char *tmp_buff = NULL; status = cf_util_get_string (child, &tmp_buff); if (strlen (tmp_buff) > 1) WARNING ("write_kafka plugin: The option \"GraphiteEscapeChar\" handles " "only one character. Others will be ignored."); tctx->escape_char = tmp_buff[0]; sfree (tmp_buff); } else { WARNING ("write_kafka plugin: Invalid directive: %s.", child->key); } if (status != 0) break; } rd_kafka_topic_conf_set_partitioner_cb(tctx->conf, kafka_partition); rd_kafka_topic_conf_set_opaque(tctx->conf, tctx); if ((tctx->topic = rd_kafka_topic_new(tctx->kafka, tctx->topic_name, tctx->conf)) == NULL) { ERROR("write_kafka plugin: cannot create topic."); goto errout; } tctx->conf = NULL; ssnprintf(callback_name, sizeof(callback_name), "write_kafka/%s", tctx->topic_name); ud.data = tctx; ud.free_func = kafka_topic_context_free; status = plugin_register_write (callback_name, kafka_write, &ud); if (status != 0) { WARNING ("write_kafka plugin: plugin_register_write (\"%s\") " "failed with status %i.", callback_name, status); goto errout; } return; errout: if (conf != NULL) rd_kafka_conf_destroy(conf); if (tctx->kafka != NULL) rd_kafka_destroy(tctx->kafka); if (tctx->topic != NULL) rd_kafka_topic_destroy(tctx->topic); if (tctx->topic_name != NULL) free(tctx->topic_name); if (tctx->conf != NULL) rd_kafka_topic_conf_destroy(tctx->conf); sfree(tctx); } /* }}} int kafka_config_topic */
int lzss_test(char* filename, unsigned int* psize, unsigned int* pcompsize) { int ret; unsigned char* in = NULL; unsigned int insize; unsigned char* out = NULL; unsigned int outsize, outmaxsize; unsigned int crc0, crc1; print("File: %s\n", filename); if(!file_get(filename, &in, &insize)) goto error; *psize = insize; crc32_init(); crc0 = crc32_buffer(in, insize); print("crc: %08X\n", crc0); print("uncompressed size: %d bytes\n", insize); outmaxsize = 2 * insize; out = memory_alloc(outmaxsize); outsize = lzss_compress(in, insize, out, outmaxsize); if(outsize == -1) { print("Compression error\n"); goto error; } *pcompsize = outsize; print("compressed size: %d bytes\n", outsize); print("ratio: %d%%\n", (outsize * 100) / insize); memory_set(in, insize, 0); outsize = lzss_decompress(out, outsize, in, insize); crc1 = crc32_buffer(in, insize); if(crc0 != crc1) { print("CRCs do not match!\n"); goto error; } //if(!file_put("out", in, outsize)) // goto error; print("Success.\n\n"); ret = 1; done: if(in) memory_free(in); if(out) memory_free(out); return ret; error: ret = 0; goto done; }