예제 #1
0
void avi_put_astream_format_header(avi_Context* AVI, io_Stream* stream)
{
	int axd_size        = stream->extra_data_size;
	int axd_size_align  = (stream->extra_data_size+1) & ~1;

	int sampsize = avi_audio_sample_size(stream);

	int64_t strf = avi_open_tag(AVI, "strf");// audio stream format
	io_write_wl16(AVI->writer, stream->a_fmt);    // Format (codec) tag
	io_write_wl16(AVI->writer, stream->a_chans);  // Number of channels
	io_write_wl32(AVI->writer, stream->a_rate);   // SamplesPerSec
	io_write_wl32(AVI->writer, stream->mpgrate/8);// Average Bytes per sec
	io_write_wl16(AVI->writer, sampsize/4);       // BlockAlign
	io_write_wl16(AVI->writer, stream->a_bits);   //BitsPerSample
	io_write_wl16(AVI->writer, axd_size);         //size of extra data
	// write extradata (codec private)
	if (axd_size > 0 && stream->extra_data)
	{
		io_write_buf(AVI->writer, stream->extra_data, axd_size);
		if (axd_size != axd_size_align)
		{
			io_write_w8(AVI->writer, 0);  //align
		}
	}
	avi_close_tag(AVI, strf); //write the chunk size
}
예제 #2
0
파일: sha_fs.c 프로젝트: jmscott/blobio
/*
 *  Write a portion of a blob to local storage and derive a partial digest.
 *  Return 1 if the accumulated digest matches the expected digest,
 *  0 if the partial digest does not match do not match.
 */
static int
eat_chunk(struct request *r, blk_SHA_CTX *p_ctx, int fd, unsigned char *buf,
	  int buf_size)
{
	struct sha_fs_request *sp = (struct sha_fs_request *)r->open_data;
	blk_SHA_CTX ctx;
	unsigned char digest[20];

	/*
	 *  Update the incremental digest.
	 */
	blk_SHA1_Update(p_ctx, buf, buf_size);

	/*
	 *  Write the chunk to the local temp file.
	 */
	if (io_write_buf(fd, buf, buf_size))
		_panic2(r, "eat_chunk: write(tmp) failed", strerror(errno));
	/*
	 *  Determine if we have seen the whole blob
	 *  by copying the incremental digest, finalizing it,
	 *  then comparing to the expected blob.
	 */
	memcpy(&ctx, p_ctx, sizeof *p_ctx);
	blk_SHA1_Final(digest, &ctx);
	return memcmp(sp->digest, digest, 20) == 0 ? 1 : 0;
}
예제 #3
0
void avi_put_vstream_format_header(avi_Context* AVI, io_Stream* stream)
{
	int vxd_size        = stream->extra_data_size;
	int vxd_size_align  = (stream->extra_data_size+1) & ~1;

	int64_t strf = avi_open_tag(AVI, "strf");   // stream format header
	io_write_wl32(AVI->writer, 40 + vxd_size);  // sruct Size
	io_write_wl32(AVI->writer, stream->width);  // Width
	io_write_wl32(AVI->writer, stream->height); // Height
	io_write_wl16(AVI->writer, 1);              // Planes
	io_write_wl16(AVI->writer, 24);             // Count - bitsperpixel - 1,4,8 or 24  32
	if(strncmp(stream->compressor,"DIB",3)==0)
		io_write_wl32(AVI->writer, 0);          // Compression
	else
		io_write_4cc(AVI->writer, stream->compressor);
	io_write_wl32(AVI->writer, stream->width*stream->height*3);// image size (in bytes?)
	io_write_wl32(AVI->writer, 0);              // XPelsPerMeter
	io_write_wl32(AVI->writer, 0);              // YPelsPerMeter
	io_write_wl32(AVI->writer, 0);              // ClrUsed: Number of colors used
	io_write_wl32(AVI->writer, 0);              // ClrImportant: Number of colors important
	// write extradata (codec private)
	if (vxd_size > 0 && stream->extra_data)
	{
		io_write_buf(AVI->writer, stream->extra_data, vxd_size);
		if (vxd_size != vxd_size_align)
		{
			io_write_w8(AVI->writer, 0);  //align
		}
	}
	avi_close_tag(AVI, strf); //write the chunk size
}
예제 #4
0
/* Insert a new workticket into the work list for consideration by the
   I/O loop.
 */
void wt_insert(WorkTicket *wt)
{
        g_assert(wt);

        if(int_option(kOption_verbose) & VERBOSE_FLOW)
                g_message("Inserting work ticket 0x%lX", (unsigned long)wt);
        if(wt_queue_full())
                g_warning("Inserting work ticket even though the work list is full.");
        work_list = g_slist_append(work_list, wt);
        /* We need to reference the write buffer just to make sure
           it's registered, otherwise the main event loop might not
           consider it for output. */
        (void)io_write_buf();
        return;
}
예제 #5
0
/* When we think we're done, it's nice to check to see that we are.
   If something's wrong, we should at least report it.  We don't
   actually do more than emit a warning of incomplete work.
 */
void wt_check_for_orphan_work()
{
        IOSBuf *ios;
        int num;
        int cmd;
        
        ios = io_write_buf();
        if(ios && (num = ios_buffer_size(ios))) {
                g_warning("%d bytes remain in write queue", num);
                cmd = (int)(*(char *)ios_at(ios, 0));
                proto_print_command(cmd);
        }
        if((num = g_slist_length(work_list)))
                g_warning("%d items remain in the work list", num);
        return;
}
예제 #6
0
파일: sha_fs.c 프로젝트: jmscott/blobio
/*
 *  Digest a local blob stream and store the digested blob.
 */
static int
sha_fs_digest(struct request *r, int fd, char *hex_digest, int do_put)
{
	char unsigned buf[4096], digest[20], *d, *d_end;
	char *h;
	blk_SHA_CTX ctx;
	int nread;
	int tmp_fd = -1;
	char tmp_path[MAX_FILE_PATH_LEN];
	int status = 0;

	tmp_path[0] = 0;
	if (do_put) {
		static int drift = 0;

		if (drift++ >= 999)
			drift = 0;

		/*
		 *  Open a temporary file in $sha_fs_root/tmp to accumulate the
		 *  blob read from the stream.  The file looks like
		 *
		 *	digest-time-pid-drift
		 */
		snprintf(tmp_path, sizeof tmp_path, "%s/digest-%d-%u-%d",
						boot_data.tmp_dir_path,
						/*
						 *  Warning:
						 *	Casting time() to int is
						 *	incorrect!!
						 */
						(int)time((time_t *)0),
						getpid(), drift);
		/*
		 *  Open the file ... need O_LARGEFILE support!!
		 *  Need to catch EINTR!!!!
		 */
		tmp_fd = io_open(tmp_path, O_CREAT|O_EXCL|O_WRONLY|O_APPEND,
								S_IRUSR);
		if (tmp_fd < 0)
			_panic3(r, "digest: open(tmp) failed", tmp_path,
							strerror(errno));
	}
	blk_SHA1_Init(&ctx);
	while ((nread = io_read(fd, buf, sizeof buf)) > 0) {
		blk_SHA1_Update(&ctx, buf, nread);
		if (do_put && io_write_buf(tmp_fd, buf, nread) != 0)
			_panic2(r, "digest: write_buf(tmp) failed",
						strerror(errno));
	}
	if (nread < 0) {
		_error(r, "digest: _read() failed");
		goto croak;
	}
	blk_SHA1_Final(digest, &ctx);

	if (do_put) {
		status = io_close(tmp_fd);
		tmp_fd = -1;
		if (status)
			_panic2(r,"digest: close(tmp) failed",strerror(errno));
	}

	/*
	 *  Convert the binary sha digest to text.
	 */
	h = hex_digest;
	d = digest;
	d_end = d + 20;
	while (d < d_end) {
		*h++ = nib2hex[(*d & 0xf0) >> 4];
		*h++ = nib2hex[*d & 0xf];
		d++;
	}
	*h = 0;

	/*
	 *  Move the blob from the temporary file to the blob file.
	 */
	if (do_put) {
		blob_path(r, hex_digest);
		arbor_rename(tmp_path,
			((struct sha_fs_request *)r->open_data)->blob_path);
		tmp_path[0] = 0;
	}

	goto cleanup;
croak:
	status = -1;
cleanup:
	if (tmp_fd > -1 && io_close(tmp_fd))
		_panic2(r, "digest: close(tmp) failed", strerror(errno));
	if (tmp_path[0] && io_unlink(tmp_path))
		_panic3(r, "digest: unlink(tmp) failed", tmp_path,
						strerror(errno));
	return status;
}
예제 #7
0
파일: sha_fs.c 프로젝트: jmscott/blobio
/*
 *  Copy a local blob to a local stream.
 *
 *  Return 0 if stream matches signature, -1 otherwise.
 *  Note: this needs to be folded into sha_fs_get().
 */
static int
sha_fs_copy(struct request *r, int out_fd)
{
	struct sha_fs_request *sp = (struct sha_fs_request *)r->open_data;
	int status = 0;
	blk_SHA_CTX ctx;
	unsigned char digest[20];
	int fd;
	unsigned char chunk[CHUNK_SIZE];
	int nread;
	static char n[] = "sha_fs_write";

	blob_path(r, r->digest);

	/*
	 *  Open the file to the blob.
	 */
	switch (_open(r, sp->blob_path, &fd)) {
	case 0:
		break;
	case ENOENT:
		_warn3(r, n, "open(blob): not found", r->digest);
		return 1;
	default:
		_panic2(r, n, "_open(blob) failed");
	}

	blk_SHA1_Init(&ctx);

	/*
	 *  Read a chunk from the file, write chunk to local stream,
	 *  update incremental digest.
	 */
	while ((nread = _read(r, fd, chunk, sizeof chunk)) > 0) {
		if (io_write_buf(out_fd, chunk, nread)) {
			_error2(r, n, "write_buf() failed");
			goto croak;
		}
			
		/*
		 *  Update the incremental digest.
		 */
		blk_SHA1_Update(&ctx, chunk, nread);
	}
	if (nread < 0)
		_panic2(r, n, "_read(blob) failed");
	/*
	 *  Finalize the digest.
	 */
	blk_SHA1_Final(digest, &ctx);

	/*
	 *  If the calculated digest does NOT match the stored digest,
	 *  then zap the blob from storage and get panicy.
	 *  A corrupt blob is a bad, bad thang.
	 */
	if (memcmp(sp->digest, digest, 20))
		_panic3(r, n, "stored blob doesn't match digest", r->digest);
	goto cleanup;
croak:
	status = -1;
cleanup:
	if (_close(r, &fd))
		_panic2(r, n, "_close(blob) failed");
	return status;
}
예제 #8
0
int avi_write_packet(avi_Context* AVI, int stream_index, BYTE *data, uint32_t size, int64_t dts, int block_align, int32_t flags)
{
    char tag[5];
    unsigned int i_flags=0;

    io_Stream *stream= get_stream(AVI->stream_list, stream_index);

	avi_RIFF* riff = avi_get_last_riff(AVI);
	//align
    while(block_align==0 && dts != AV_NOPTS_VALUE && dts > stream->packet_count)
        avi_write_packet(AVI, stream_index, NULL, 0, AV_NOPTS_VALUE, 0, 0);

    stream->packet_count++;

    // Make sure to put an OpenDML chunk when the file size exceeds the limits
    if (io_get_offset(AVI->writer) - riff->riff_start > AVI_MAX_RIFF_SIZE)
    {
        avi_write_ix(AVI);
        avi_close_tag(AVI, riff->movi_list);

        if (riff->id == 1)
            avi_write_idx1(AVI, riff);

        avi_close_tag(AVI, riff->riff_start);

        avi_add_new_riff(AVI);
        
        riff = avi_get_last_riff(AVI); //update riff
    }

    avi_stream2fourcc(tag, stream);

    if(flags & AV_PKT_FLAG_KEY) //key frame
        i_flags = 0x10;

    if (stream->type == STREAM_TYPE_AUDIO)
       stream->audio_strm_length += size;


    avi_Index* idx = (avi_Index*) stream->indexes;
    int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
    int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
    if (idx->ents_allocated <= idx->entry)
    {
        idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*));
        if (!idx->cluster)
            return -1;
        idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(avi_Ientry));
        if (!idx->cluster[cl])
            return -1;
        idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
    }

    idx->cluster[cl][id].flags = i_flags;
    idx->cluster[cl][id].pos = io_get_offset(AVI->writer) - riff->movi_list;
    idx->cluster[cl][id].len = size;
    idx->entry++;


    io_write_4cc(AVI->writer, tag);
    io_write_wl32(AVI->writer, size);
    io_write_buf(AVI->writer, data, size);
    if (size & 1)
        io_write_w8(AVI->writer, 0);

    io_flush_buffer(AVI->writer);

    return 0;
}