VALUE rfpset_slurp_array(VALUE self, VALUE filename) { gzFile in = gzopen(RSTRING_PTR(filename), "rb"); if(in == NULL) return rb_fix_new(-1); VALUE array = rb_ary_new(); blob* next_blob = blob_make(512); while((next_blob = blob_read(in, next_blob)) != NULL) { rb_ary_push(array, rb_str_new(next_blob->data, next_blob->size)); } gzclose(in); free(next_blob); return array; }
static void doit(const char *src, const char *dst, size_t size_in, const struct imgmin_options *opt) { MagickWand *mw; unsigned char *blob_in = 0; blob_in = blob_read(src, &size_in); MagickWandGenesis(); mw = NewMagickWand(); if (MagickReadImageBlob(mw, blob_in, size_in) == MagickTrue) { optimize_image(mw, src, dst, size_in, blob_in, opt); } /* tear it down */ DestroyMagickWand(mw); MagickWandTerminus(); }
static int sha_fs_put(struct request *r) { blk_SHA_CTX ctx; char tmp_path[MAX_FILE_PATH_LEN]; unsigned char chunk[CHUNK_SIZE], *cp, *cp_end; int fd = -1; int status = 0; char buf[MSG_SIZE]; /* * Open a temporary file in $sha_fs_root/tmp to accumulate the * blob read from the client. The file looks like * * [put|give]-time-pid-digest */ snprintf(tmp_path, sizeof tmp_path, "%s/%s-%d-%u-%s", boot_data.tmp_dir_path, r->verb, /* * Warning: * Casting time() to int is * incorrect!! */ (int)time((time_t *)0), getpid(), r->digest); /* * Open the file ... need O_LARGEFILE support!! * Need to catch EINTR!!!! */ fd = io_open(tmp_path, O_CREAT|O_EXCL|O_WRONLY|O_APPEND, S_IRUSR); if (fd < 0) { snprintf(buf, sizeof buf, "open(%s) failed: %s", tmp_path, strerror(errno)); _panic(r, buf); } /* * Initialize digest of blob being scanned from the client. */ blk_SHA1_Init(&ctx); /* * An empty blob is always put. * Note: the caller has already ensured that no more data has * been written by the client, so no need to check r->scan_size. */ if (strcmp(r->digest, empty_ascii) == 0) goto digested; /* * Copy what we have already read into the first chunk buffer. * * If we've read ahead more than we can chew, * then croak. This should never happen. */ if (r->scan_size > 0) { // Note: regress, sanity test ... remove later. if ((u8)r->scan_size != r->blob_size) _panic(r, "r->scan_size != r->blob_size"); if (r->scan_size > (int)(sizeof chunk - 1)) { snprintf(buf, sizeof buf, "max=%lu", (long unsigned)(sizeof chunk - 1)); _panic2(r, "scanned chunk too big", buf); } /* * See if the entire blob fits in the first read. */ if (eat_chunk(r, &ctx, fd, r->scan_buf, r->scan_size)) goto digested; } cp = chunk; cp_end = &chunk[sizeof chunk]; /* * Read more chunks until we see the blob. */ again: while (cp < cp_end) { int nread = blob_read(r, cp, cp_end - cp); /* * Read error from client, * so zap the partial, invalid blob. */ if (nread < 0) { _error(r, "blob_read() failed"); goto croak; } if (nread == 0) { _error(r, "blob_read() returns 0 before digest seen"); goto croak; } switch (eat_chunk(r, &ctx, fd, cp, nread)) { case -1: _panic(r, "eat_chunk(local) failed"); case 1: goto digested; } cp += nread; } cp = chunk; goto again; digested: if (fd >= 0) _close(r, &fd); /* * Move the temp blob file to the final blob path. */ blob_path(r, r->digest); arbor_rename(tmp_path, ((struct sha_fs_request *)r->open_data)->blob_path); goto cleanup; croak: status = -1; cleanup: if (fd > -1) _panic(r, "_close() failed"); if (tmp_path[0] && _unlink(r, tmp_path, (int *)0)) _panic(r, "_unlink() failed"); return status; }
ham_status_t blob_duplicate_erase(ham_db_t *db, ham_offset_t table_id, ham_size_t position, ham_u32_t flags, ham_offset_t *new_table_id) { ham_status_t st; ham_record_t rec; ham_size_t i; dupe_table_t *table; ham_offset_t rid; ham_env_t *env = db_get_env(db); /* store the public record pointer, otherwise it's destroyed */ ham_size_t rs=db_get_record_allocsize(db); void *rp=db_get_record_allocdata(db); db_set_record_allocdata(db, 0); db_set_record_allocsize(db, 0); memset(&rec, 0, sizeof(rec)); if (new_table_id) *new_table_id=table_id; st=blob_read(db, table_id, &rec, 0); if (st) return (st); /* restore the public record pointer */ db_set_record_allocsize(db, rs); db_set_record_allocdata(db, rp); table=(dupe_table_t *)rec.data; /* * if BLOB_FREE_ALL_DUPES is set *OR* if the last duplicate is deleted: * free the whole duplicate table */ if (flags&BLOB_FREE_ALL_DUPES || (position==0 && dupe_table_get_count(table)==1)) { for (i=0; i<dupe_table_get_count(table); i++) { dupe_entry_t *e=dupe_table_get_entry(table, i); if (!(dupe_entry_get_flags(e)&(KEY_BLOB_SIZE_SMALL |KEY_BLOB_SIZE_TINY |KEY_BLOB_SIZE_EMPTY))) { st=blob_free(env, db, dupe_entry_get_rid(e), 0); if (st) { allocator_free(env_get_allocator(env), table); return (st); } } } st=blob_free(env, db, table_id, 0); /* [i_a] isn't this superfluous (& * dangerous), thanks to the * free_all_dupes loop above??? */ allocator_free(env_get_allocator(env), table); if (st) return (st); if (new_table_id) *new_table_id=0; return (0); } else { ham_record_t rec={0}; dupe_entry_t *e=dupe_table_get_entry(table, position); if (!(dupe_entry_get_flags(e)&(KEY_BLOB_SIZE_SMALL |KEY_BLOB_SIZE_TINY |KEY_BLOB_SIZE_EMPTY))) { st=blob_free(env, db, dupe_entry_get_rid(e), 0); if (st) { allocator_free(env_get_allocator(env), table); return (st); } } memmove(e, e+1, ((dupe_table_get_count(table)-position)-1)*sizeof(dupe_entry_t)); dupe_table_set_count(table, dupe_table_get_count(table)-1); rec.data=(ham_u8_t *)table; rec.size=sizeof(dupe_table_t) +(dupe_table_get_capacity(table)-1)*sizeof(dupe_entry_t); st=blob_overwrite(env, db, table_id, &rec, 0, &rid); if (st) { allocator_free(env_get_allocator(env), table); return (st); } if (new_table_id) *new_table_id=rid; } /* * return 0 as a rid if the table is empty */ if (dupe_table_get_count(table)==0) if (new_table_id) *new_table_id=0; allocator_free(env_get_allocator(env), table); return (0); }
VALUE blob_intersect_files(gzFile* files, int file_count) { VALUE result = rb_ary_new(); if(file_count == 0) return result; int master_idx = 0; int ii = 0; blob* master_blob = blob_make(512); blob* next_blob = blob_make(512); // bootstrap master_blob = blob_read(files[0], master_blob); // until a file runs out of data while(1) { int all_match = 1; int end_of_file = 0; for(ii = 0; ii < file_count; ++ii) { if(ii == master_idx) continue; // read blobs from this file until they aren't less than the // master blob int compare_result = 0; while(1) { next_blob = blob_read(files[ii], next_blob); if(next_blob == NULL) { end_of_file = 1; break; } else { compare_result = blob_compare(&next_blob, &master_blob); if(compare_result >= 0) break; } } // if any file ever reaches the end while we're looking it means // that we've found the entire intersection if(end_of_file) { all_match = 0; break; } // if we ever get a non-zero compare result then that means the // current candidate is a failure and we have a new candidate to // try if(compare_result != 0) { all_match = 0; break; } } // finish bailing out on end of file if(end_of_file) break; // store the match if we had one if(all_match) { rb_ary_push(result, rb_str_new(master_blob->data, master_blob->size)); } else { // if we didn't have a match then whichever blob failed first // becomes the new master and we try again blob* temp = master_blob; master_blob = next_blob; next_blob = temp; master_idx = ii; } } free(master_blob); free(next_blob); return result; }