int gridfs_store_buffer( gridfs *gfs, const char *data, gridfs_offset length, const char *remotename, const char *contenttype ) { char const *end = data + length; const char *data_ptr = data; bson_oid_t id; int chunkNumber = 0; int chunkLen; bson *oChunk; /* Large files Assertion */ assert( length <= 0xffffffff ); /* Generate and append an oid*/ bson_oid_gen( &id ); /* Insert the file's data chunk by chunk */ while ( data_ptr < end ) { chunkLen = DEFAULT_CHUNK_SIZE < ( unsigned int )( end - data_ptr ) ? DEFAULT_CHUNK_SIZE : ( unsigned int )( end - data_ptr ); oChunk = chunk_new( id, chunkNumber, data_ptr, chunkLen ); mongo_insert( gfs->client, gfs->chunks_ns, oChunk ); chunk_free( oChunk ); chunkNumber++; data_ptr += chunkLen; } /* Inserts file's metadata */ return gridfs_insert_file( gfs, remotename, id, length, contenttype ); }
static PyObject * hashsplit_read_chunk(PyObject *self, PyObject *args) { PyObject *source_file = NULL; //gcc complains if these aren't initialized PyObject *max_chunk_size = NULL; long MAX_CHUNK_SIZE; PyObject *result = NULL; Chunk *chunk; FILE *source; if (!PyArg_UnpackTuple(args, "read_chunk", 1, 2, &source_file, &max_chunk_size)) return NULL; if (!(source = PyFile_AsFile(source_file))) { PyErr_SetString(PyExc_TypeError, "Expected file or file descriptor"); return NULL; } if (!max_chunk_size || max_chunk_size == Py_None) { MAX_CHUNK_SIZE = DEFAULT_MAX_CHUNK_SIZE; } else if ((MAX_CHUNK_SIZE = PyInt_AsLong(max_chunk_size))==-1 && PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "max_chunk_size should be an integer"); return NULL; } if (MAX_CHUNK_SIZE < MIN_CHUNK_SIZE) { PyErr_SetString(PyExc_ValueError, "max_chunk_size must be larger than MIN_CHUNK_SIZE"); return NULL; } if (!(chunk = chunk_new(MAX_CHUNK_SIZE))) return PyErr_NoMemory(); if (!read_chunk(source, chunk, MAX_CHUNK_SIZE)) { PyErr_SetString(PyExc_EOFError, ""); } else { result = PyString_FromStringAndSize((char*)chunk->data, chunk->length); } chunk_delete(chunk); return result; }
static chunk_t * b16_enc(const chunk_t *inp) { chunk_t *ch = chunk_new(inp->len * 2 + 1); base16_encode((char *)ch->buf, ch->len, (char*)inp->buf, inp->len); return ch; }
/* Add new agent. Send agent ID + number of workers + router map */ static void add_agent(int fd, bool isclient) { unsigned agent = next_agent++; if (agent >= worker_cnt + maxclients) { /* Exceeded client limit */ chunk_ptr msg = msg_new_nack(); if (chunk_write(fd, msg)) { #if RPT >= 1 report(1, "Sent nack to potential client due to client limit being exceeded. Fd = %d", fd); #endif } else { #if RPT >= 3 report(3, "Couldn't send nack to potential client. Fd = %d", fd); #endif } chunk_free(msg); return; } /* Need to break into sequence of messages according to max. chunk length */ chunk_ptr msg = NULL; size_t bcount = 0; size_t ncount = router_addr_set->nelements; set_iterstart(router_addr_set); word_t id; bool ok = true; while (ok && set_iternext(router_addr_set, &id)) { if (bcount == 0) { /* Start new block */ size_t blen = ncount; if (blen > MAX_IDS) blen = MAX_IDS; msg = chunk_new(blen+1); } word_t wd = id << 16; chunk_insert_word(msg, wd, bcount+1); bcount++; if (bcount == MAX_IDS) { /* This block is filled */ size_t h1 = ((word_t) agent << 48) | ((word_t) ncount << 32) | ((word_t) worker_cnt << 16) | MSG_ACK_AGENT; chunk_insert_word(msg, h1, 0); ok = chunk_write(fd, msg); chunk_free(msg); ncount -= bcount; bcount = 0; } } if (ok && ncount > 0) { size_t h1 = ((word_t) agent << 48) | ((word_t) ncount << 32) | ((word_t) worker_cnt << 16) | MSG_ACK_AGENT; chunk_insert_word(msg, h1, 0); ok = chunk_write(fd, msg); chunk_free(msg); ncount -= bcount; } #if RPT >= 3 report(3, "Added agent %u with descriptor %d", agent, fd); #endif }
static chunk_t * b32_enc(const chunk_t *inp) { chunk_t *ch = chunk_new(base32_encoded_size(inp->len)); base32_encode((char *)ch->buf, ch->len, (char*)inp->buf, inp->len); ch->len = strlen((char *) ch->buf); return ch; }
/* Extract subchunk */ chunk_ptr chunk_get_chunk(chunk_ptr cp, size_t offset, size_t length) { chunk_ptr ncp = chunk_new(length); size_t i; for (i = 0; i < length; i++) { chunk_insert_word(ncp, chunk_get_word(cp, i+offset), i); } return ncp; }
static chunk_t * b64_enc(const chunk_t *inp) { chunk_t *ch = chunk_new(BASE64_BUFSIZE(inp->len)); base64_encode((char *)ch->buf, ch->len, (char *)inp->buf, inp->len, 0); ch->len = strlen((char *) ch->buf); return ch; }
static void __chunkqueue_append_file(liChunkQueue *cq, GString *filename, off_t start, off_t length, int fd, gboolean is_temp) { liChunk *c = chunk_new(); c->type = FILE_CHUNK; c->data.file.file = li_chunkfile_new(filename, fd, is_temp); c->data.file.start = start; c->data.file.length = length; g_queue_push_tail_link(&cq->queue, &c->cq_link); cq->length += length; cq->bytes_in += length; }
/* memory gets copied */ void li_chunkqueue_append_mem(liChunkQueue *cq, const void *mem, gssize len) { liChunk *c; if (!len) return; c = chunk_new(); c->type = MEM_CHUNK; c->mem = g_byte_array_sized_new(len); g_byte_array_append(c->mem, mem, len); g_queue_push_tail_link(&cq->queue, &c->cq_link); cq->length += c->mem->len; cq->bytes_in += c->mem->len; cqlimit_update(cq, c->mem->len); }
static chunk_t * b32_dec(const chunk_t *inp) { chunk_t *ch = chunk_new(inp->len);//XXXX int r = base32_decode((char *)ch->buf, ch->len, (char *)inp->buf, inp->len); if (r >= 0) { ch->len = r; } else { chunk_free(ch); } return ch; }
static chunk_t * b16_dec(const chunk_t *inp) { chunk_t *ch = chunk_new(CEIL_DIV(inp->len, 2)); int r = base16_decode((char *)ch->buf, ch->len, (char *)inp->buf, inp->len); if (r >= 0) { ch->len = r; } else { chunk_free(ch); } return ch; }
static chunk_t * b64_dec(const chunk_t *inp) { chunk_t *ch = chunk_new(inp->len);//XXXX This could be shorter. int r = base64_decode((char *)ch->buf, ch->len, (char *)inp->buf, inp->len); if (r >= 0) { ch->len = r; } else { chunk_free(ch); } return ch; }
/* Replicate a chunk */ chunk_ptr chunk_clone(chunk_ptr cp) { if (cp == NULL && chunk_check_level >= 2) { chunk_error("Null Pointer", cp); return NULL; } chunk_ptr ncp = chunk_new(cp->length); ncp->length = cp->length; size_t i; for (i = 0; i < cp->length; i++) { ncp->words[i] = cp->words[i]; } return ncp; }
/* pass ownership of mem to chunkqueue, do not free/modify it afterwards * you may modify the data (not the length) if you are sure it isn't sent before. * if the length is NULL, mem is destroyed immediately */ void li_chunkqueue_append_bytearr(liChunkQueue *cq, GByteArray *mem) { liChunk *c; if (!mem->len) { g_byte_array_free(mem, TRUE); return; } c = chunk_new(); c->type = MEM_CHUNK; c->mem = mem; g_queue_push_tail_link(&cq->queue, &c->cq_link); cq->length += mem->len; cq->bytes_in += mem->len; cqlimit_update(cq, mem->len); }
/* pass ownership of str to chunkqueue, do not free/modify it afterwards * you may modify the data (not the length) if you are sure it isn't sent before. * if the length is NULL, str is destroyed immediately */ void li_chunkqueue_append_string(liChunkQueue *cq, GString *str) { liChunk *c; if (!str->len) { g_string_free(str, TRUE); return; } c = chunk_new(); c->type = STRING_CHUNK; c->data.str = str; g_queue_push_tail_link(&cq->queue, &c->cq_link); cq->length += str->len; cq->bytes_in += str->len; cqlimit_update(cq, str->len); }
/* increases reference for cf (if length > 0) */ void li_chunkqueue_append_chunkfile(liChunkQueue *cq, liChunkFile *cf, off_t start, off_t length) { if (length) { liChunk *c = chunk_new(); li_chunkfile_acquire(cf); c->type = FILE_CHUNK; c->data.file.file = cf; c->data.file.start = start; c->data.file.length = length; g_queue_push_tail_link(&cq->queue, &c->cq_link); cq->length += length; cq->bytes_in += length; } }
int gridfs_store_file( gridfs *gfs, const char *filename, const char *remotename, const char *contenttype ) { char buffer[DEFAULT_CHUNK_SIZE]; FILE *fd; bson_oid_t id; int chunkNumber = 0; gridfs_offset length = 0; gridfs_offset chunkLen = 0; bson *oChunk; /* Open the file and the correct stream */ if ( strcmp( filename, "-" ) == 0 ) fd = stdin; else { fd = fopen( filename, "rb" ); if (fd == NULL) return MONGO_ERROR; } /* Generate and append an oid*/ bson_oid_gen( &id ); /* Insert the file chunk by chunk */ chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd ); do { oChunk = chunk_new( id, chunkNumber, buffer, chunkLen ); mongo_insert( gfs->client, gfs->chunks_ns, oChunk, NULL ); chunk_free( oChunk ); length += chunkLen; chunkNumber++; chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd ); } while ( chunkLen != 0 ); /* Close the file stream */ if ( fd != stdin ) fclose( fd ); /* Large files Assertion */ /* assert(length <= 0xffffffff); */ /* Optional Remote Name */ if ( remotename == NULL || *remotename == '\0' ) { remotename = filename; } /* Inserts file's metadata */ return gridfs_insert_file( gfs, remotename, id, length, contenttype ); }
/* pass ownership of one buffer reference to chunkqueue * if the length is NULL, reference is released immediately */ void li_chunkqueue_append_buffer(liChunkQueue *cq, liBuffer *buffer) { liChunk *c; if (!buffer->used) { li_buffer_release(buffer); return; } c = chunk_new(); c->type = BUFFER_CHUNK; c->data.buffer.buffer = buffer; c->data.buffer.offset = 0; c->data.buffer.length = buffer->used; g_queue_push_tail_link(&cq->queue, &c->cq_link); cq->length += buffer->used; cq->bytes_in += buffer->used; cqlimit_update(cq, buffer->used); }
void li_chunkqueue_append_buffer2(liChunkQueue *cq, liBuffer *buffer, gsize offset, gsize length) { liChunk *c; if (length == 0) { li_buffer_release(buffer); return; } assert(offset + length <= buffer->used); c = chunk_new(); c->type = BUFFER_CHUNK; c->data.buffer.buffer = buffer; c->data.buffer.offset = offset; c->data.buffer.length = length; g_queue_push_tail_link(&cq->queue, &c->cq_link); cq->length += length; cq->bytes_in += length; cqlimit_update(cq, length); }
/* Convert a string into a chunk. Limited to strings of length <= WORD_BYTES */ chunk_ptr str2chunk(char *s) { char buf[WORD_BYTES * CHUNK_MAX_LENGTH]; size_t len = (strnlen(s, WORD_BYTES * CHUNK_MAX_LENGTH) + WORD_BYTES - 1) / WORD_BYTES; chunk_ptr cp = chunk_new(len); size_t cidx, bidx; size_t sidx = 0; for (cidx = 0; cidx < len; cidx++) { for (bidx = 0; bidx < WORD_BYTES && s[sidx]; bidx++) { buf[bidx] = s[sidx++]; } for (; bidx < WORD_BYTES; bidx++) { buf[bidx] = '\0'; } word_t wd = *(word_t *) buf; chunk_insert_word(cp, wd, cidx); } return cp; }
bson gridfile_writer_done( gridfile* gfile ) { /* write any remaining pending chunk data. * pending data will always take up less than one chunk */ bson* oChunk; if( gfile->pending_data ) { oChunk = chunk_new(gfile->id, gfile->chunk_num, gfile->pending_data, gfile->pending_len); mongo_insert(gfile->gfs->client, gfile->gfs->chunks_ns, oChunk); chunk_free(oChunk); free(gfile->pending_data); gfile->length += gfile->pending_len; } /* insert into files collection */ return gridfs_insert_file(gfile->gfs, gfile->remote_name, gfile->id, gfile->length, gfile->content_type); }
static void* chunk_alloc_mem(rb_tracelog_chunk_t* chunk, size_t size) { size_t space_left; void* ret; assert(!chunk->next); assert(chunk->capacity >= chunk->len); space_left = chunk->capacity - chunk->len; if (space_left < size) { rb_tracelog_chunk_t *new_chunk = chunk_new(); chunk->next = new_chunk; g_tracelog->tail_chunk = new_chunk; return chunk_alloc_mem(new_chunk, size); } chunk->len += size; ret = chunk->data_head; chunk->data_head = (void*)((uintptr_t)chunk->data_head + size); return ret; }
void tracelog_init(void) { rb_tracelog_event_t* event; g_tracelog = ALLOC(rb_tracelog_t); g_tracelog->head_chunk = chunk_new(); g_tracelog->tail_chunk = g_tracelog->head_chunk; event = tracelog_alloc_mem(sizeof(rb_tracelog_event_t)); event->name = "TracingStartSentinel"; event->category = "TraceLog"; event->args = NULL; event->phase = 'I'; event->timestamp = clock_abs_ns(); event->next = NULL; g_tracelog->head_event = event; g_tracelog->tail_event = event; g_tracelog->head_category = NULL; }
void gridfile_write_buffer( gridfile *gfile, const char *data, gridfs_offset length ) { int bytes_left = 0; int data_partial_len = 0; int chunks_to_write = 0; char *buffer; bson *oChunk; gridfs_offset to_write = length + gfile->pending_len; if ( to_write < DEFAULT_CHUNK_SIZE ) { /* Less than one chunk to write */ if( gfile->pending_data ) { gfile->pending_data = ( char * )bson_realloc( ( void * )gfile->pending_data, gfile->pending_len + to_write ); memcpy( gfile->pending_data + gfile->pending_len, data, length ); } else if ( to_write > 0 ) { gfile->pending_data = ( char * )bson_malloc( to_write ); memcpy( gfile->pending_data, data, length ); } gfile->pending_len += length; } else { /* At least one chunk of data to write */ /* If there's a pending chunk to be written, we need to combine * the buffer provided up to DEFAULT_CHUNK_SIZE. */ if ( gfile->pending_len > 0 ) { chunks_to_write = to_write / DEFAULT_CHUNK_SIZE; bytes_left = to_write % DEFAULT_CHUNK_SIZE; data_partial_len = DEFAULT_CHUNK_SIZE - gfile->pending_len; buffer = ( char * )bson_malloc( DEFAULT_CHUNK_SIZE ); memcpy( buffer, gfile->pending_data, gfile->pending_len ); memcpy( buffer + gfile->pending_len, data, data_partial_len ); oChunk = chunk_new( gfile->id, gfile->chunk_num, buffer, DEFAULT_CHUNK_SIZE ); mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk ); chunk_free( oChunk ); gfile->chunk_num++; gfile->length += DEFAULT_CHUNK_SIZE; data += data_partial_len; chunks_to_write--; bson_free( buffer ); } while( chunks_to_write > 0 ) { oChunk = chunk_new( gfile->id, gfile->chunk_num, data, DEFAULT_CHUNK_SIZE ); mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk ); chunk_free( oChunk ); gfile->chunk_num++; chunks_to_write--; gfile->length += DEFAULT_CHUNK_SIZE; data += DEFAULT_CHUNK_SIZE; } bson_free( gfile->pending_data ); /* If there are any leftover bytes, store them as pending data. */ if( bytes_left == 0 ) gfile->pending_data = NULL; else { gfile->pending_data = ( char * )bson_malloc( bytes_left ); memcpy( gfile->pending_data, data, bytes_left ); } gfile->pending_len = bytes_left; } }
Chunk* get_chunk (void) { if (nb_recyclable_chunks == 0) return chunk_new (); else return chunks[--nb_recyclable_chunks]; }
/* steal up to length bytes from in and put them into out, return number of bytes stolen */ goffset li_chunkqueue_steal_len(liChunkQueue *out, liChunkQueue *in, goffset length) { liChunk *c, *cnew; GList* l; goffset bytes = 0, meminbytes = 0, memoutbytes = 0; goffset we_have; while ( (NULL != (c = li_chunkqueue_first_chunk(in))) && length > 0 ) { we_have = li_chunk_length(c); if (!we_have) { /* remove empty chunks */ if (c->type == STRING_CHUNK) meminbytes -= c->data.str->len; else if (c->type == MEM_CHUNK) meminbytes -= c->mem->len; else if (c->type == BUFFER_CHUNK) meminbytes -= c->data.buffer.length; chunk_free(in, c); continue; } if (we_have <= length) { /* move complete chunk */ l = g_queue_pop_head_link(&in->queue); g_queue_push_tail_link(&out->queue, l); bytes += we_have; if (c->type == STRING_CHUNK) { meminbytes -= c->data.str->len; memoutbytes += c->data.str->len; } else if (c->type == MEM_CHUNK) { meminbytes -= c->mem->len; memoutbytes += c->mem->len; } else if (c->type == BUFFER_CHUNK) { meminbytes -= c->data.buffer.length; memoutbytes += c->data.buffer.length; } length -= we_have; } else { /* copy first part of a chunk */ cnew = chunk_new(); switch (c->type) { case UNUSED_CHUNK: /* impossible, has length 0 */ /* remove "empty" chunks */ chunk_free(in, c); chunk_free(NULL, cnew); continue; case STRING_CHUNK: /* change type to MEM_CHUNK, as we copy it anyway */ cnew->type = MEM_CHUNK; cnew->mem = g_byte_array_sized_new(length); g_byte_array_append(cnew->mem, (guint8*) c->data.str->str + c->offset, length); memoutbytes += length; break; case MEM_CHUNK: cnew->type = MEM_CHUNK; cnew->mem = g_byte_array_sized_new(length); g_byte_array_append(cnew->mem, (guint8*) c->mem->data + c->offset, length); memoutbytes += length; break; case FILE_CHUNK: cnew->type = FILE_CHUNK; li_chunkfile_acquire(c->data.file.file); cnew->data.file.file = c->data.file.file; cnew->data.file.start = c->data.file.start + c->offset; cnew->data.file.length = length; break; case BUFFER_CHUNK: cnew->type = BUFFER_CHUNK; li_buffer_acquire(c->data.buffer.buffer); cnew->data.buffer.buffer = c->data.buffer.buffer; cnew->data.buffer.offset = c->data.buffer.offset + c->offset; cnew->data.buffer.length = length; memoutbytes += length; break; } c->offset += length; bytes += length; length = 0; g_queue_push_tail_link(&out->queue, &cnew->cq_link); } } in->bytes_out += bytes; in->length -= bytes; out->bytes_in += bytes; out->length += bytes; cqlimit_update(out, memoutbytes); cqlimit_update(in, meminbytes); return bytes; }