예제 #1
0
static
void custom_unix_test(void)
{
    void *array;
    bool ok;

    /* "linux" is replaced by "1" in .template so we use "Linux" */
    dr_fprintf(STDERR, "  testing custom Linux alloc....");

    array = dr_raw_mem_alloc(PAGE_SIZE, DR_MEMPROT_READ | DR_MEMPROT_WRITE, NULL);
    if (array == NULL)
        dr_fprintf(STDERR, "error: unable to mmap\n");
    write_array(array);

# ifdef LINUX
    array = dr_raw_mremap(array, PAGE_SIZE, PAGE_SIZE*2, MREMAP_MAYMOVE, NULL);
    if ((ptr_int_t)array <= 0 && (ptr_int_t)array >= -PAGE_SIZE)
        dr_fprintf(STDERR, "error: unable to mremap\n");
    write_array(array);
# endif

    ok = dr_raw_mem_free(array, PAGE_SIZE*2);
    if (!ok)
        dr_fprintf(STDERR, "error: failed to munmap\n");

# ifdef LINUX
    array = dr_raw_brk(0);
    if (array == NULL)
        dr_fprintf(STDERR, "error: unable to query brk\n");
# endif

    dr_fprintf(STDERR, "success\n");
}
예제 #2
0
파일: StyleTask.C 프로젝트: juddy/edcde
static void
autonumber(FlexBuffer *buffer, const Token& t)
{
  const AttributeRec *id = t.LookupAttr(SGMLName::intern("ID", 1));
  const AttributeRec *type = t.LookupAttr(SGMLName::intern("Type", 1));
  const AttributeRec *initial = t.LookupAttr(SGMLName::intern("Initial", 1));
  const AttributeRec *delta = t.LookupAttr(SGMLName::intern("Delta", 1));
  const AttributeRec *reset = t.LookupAttr(SGMLName::intern("Reset", 1));
  const AttributeRec *counter = t.LookupAttr(SGMLName::intern("Counter", 1));

  if(!id) throw(Unexpected("Autonumber: missing ID attribute"));
  if(!type) throw(Unexpected("Autonumber: missing Type attribute"));
  if(!initial) throw(Unexpected("Autonumber: missing Initial attribute"));
  if(!delta) throw(Unexpected("Autonumber: missing Delta attribute"));
  if(!reset) throw(Unexpected("Autonumber: missing Reset attribute"));
  if(!counter) throw(Unexpected("Autonumber: missing Counter attribute"));

  
  buffer->writeStr(id->getAttrValueString());
  buffer->writeStr(" = autonumber[\"");
  buffer->writeStr(type->getAttrValueString());
  buffer->writeStr("\", \"");
  buffer->writeStr(initial->getAttrValueString());
  buffer->writeStr("\", \"");
  buffer->writeStr(delta->getAttrValueString());
  buffer->writeStr("\", ");

  write_array(buffer,  counter->getAttrValueString(), 1);

  buffer->writeStr(", ");

  write_array(buffer, reset->getAttrValueString(), 1);

  buffer->writeStr("]\n\n");
}
void Wf_return::write(string & indent, ostream & os) {
  //  int is_complex;
  //  Array2 <doublevar> amp;//!< ln( |psi| ), grad ln( |psi| ), grad^2 |psi|/|psi|
  //  Array2 <doublevar> phase; //!< phase and derivatives
  //  Array2 <dcomplex> cvals; //!< (null), grad ln(|psi|), grad^2 psi/psi  for
  os << indent << "nfunc " << amp.GetDim(0) << " nst " << amp.GetDim(1) << endl;
  os << indent << "is_complex " << is_complex << endl;
  os << indent << "amp "; write_array(os, amp) ;
  os << indent << "phase "; write_array(os, phase) ;
  os << indent << "cvals "; write_array(os, cvals) ;
  
  
  
}
예제 #4
0
파일: rwarray.c 프로젝트: LediKorsar/gawk
static int
write_value(int fd, NODE *val)
{
	int code, len;

	if (val->type == Node_var_array) {
		code = htonl(2);
		if (write(fd, & code, sizeof(code)) != sizeof(code))
			return -1;
		return write_array(fd, val);
	}

	if ((val->flags & NUMBER) != 0) {
		code = htonl(1);
		if (write(fd, & code, sizeof(code)) != sizeof(code))
			return -1;

		if (write(fd, & val->numbr, sizeof(val->numbr)) != sizeof(val->numbr))
			return -1;
	} else {
		code = 0;
		if (write(fd, & code, sizeof(code)) != sizeof(code))
			return -1;

		len = htonl(val->stlen);
		if (write(fd, & len, sizeof(len)) != sizeof(len))
			return -1;

		if (write(fd, val->stptr, val->stlen) != val->stlen)
			return -1;
	}

	return 0;
}
예제 #5
0
파일: rwarray0.c 프로젝트: Distrotech/gawk
static awk_value_t *
do_writea(int nargs, awk_value_t *result)
{
	awk_value_t filename, array;
	int fd = -1;
	uint32_t major = MAJOR;
	uint32_t minor = MINOR;

	assert(result != NULL);
	make_number(0.0, result);

	if (do_lint && nargs > 2)
		lintwarn(ext_id, _("writea: called with too many arguments"));

	if (nargs < 2)
		goto out;

	/* directory is first arg, array to dump is second */
	if (! get_argument(0, AWK_STRING, & filename)) {
		fprintf(stderr, _("do_writea: argument 0 is not a string\n"));
		errno = EINVAL;
		goto done1;
	}

	if (! get_argument(1, AWK_ARRAY, & array)) {
		fprintf(stderr, _("do_writea: argument 1 is not an array\n"));
		errno = EINVAL;
		goto done1;
	}

	/* open the file, if error, set ERRNO and return */
	fd = creat(filename.str_value.str, 0600);
	if (fd < 0)
		goto done1;

	if (write(fd, MAGIC, strlen(MAGIC)) != strlen(MAGIC))
		goto done1;

	major = htonl(major);
	if (write(fd, & major, sizeof(major)) != sizeof(major))
		goto done1;

	minor = htonl(minor);
	if (write(fd, & minor, sizeof(minor)) != sizeof(minor))
		goto done1;

	if (write_array(fd, array.array_cookie)) {
		make_number(1.0, result);
		goto done0;
	}

done1:
	update_ERRNO_int(errno);
	unlink(filename.str_value.str);

done0:
	close(fd);
out:
	return result;
}
예제 #6
0
void JsonValue::write(std::string &json) const
{
	switch (type)
	{
	case Type::null:
		json += "null";
		break;
	case Type::object:
		write_object(json);
		break;
	case Type::array:
		write_array(json);
		break;
	case Type::string:
		write_string(value_string, json);
		break;
	case Type::number:
		write_number(json);
		break;
	case Type::boolean:
		json += value_boolean ? "true" : "false";
		break;
	case Type::undefined:
		break;
	}
}
예제 #7
0
static void write_value(io::stream& e, const void* object, const bsreq* req, int level) {
	if(req->type == number_type) {
		auto value = req->get(object);
		e << value;
	} else if(req->type == text_type) {
		auto value = (const char*)req->get(object);
		e << "\"" << value << "\"";
	} else if(req->reference) {
		auto value = (const void*)req->get(object);
		write_key(e, value, req->type);
	} else if(req->isenum) {
		auto value = req->get(object);
		auto pd = bsdata::find(req->type);
		if(pd)
			write_key(e, pd->get(value), req->type);
		else
			e << value;
	} else {
		if(level > 0)
			e << "(";
		auto count = 0;
		for(auto f = req->type; *f; f++) {
			if(count)
				e << ", ";
			write_array(e, object, f, level + 1);
			count++;
		}
		if(level > 0)
			e << ")";
	}
}
예제 #8
0
파일: rwarray0.c 프로젝트: Distrotech/gawk
static awk_bool_t
write_value(int fd, awk_value_t *val)
{
	uint32_t code, len;

	if (val->val_type == AWK_ARRAY) {
		code = htonl(2);
		if (write(fd, & code, sizeof(code)) != sizeof(code))
			return awk_false;
		return write_array(fd, val->array_cookie);
	}

	if (val->val_type == AWK_NUMBER) {
		code = htonl(1);
		if (write(fd, & code, sizeof(code)) != sizeof(code))
			return awk_false;

		if (write(fd, & val->num_value, sizeof(val->num_value)) != sizeof(val->num_value))
			return awk_false;
	} else {
		code = 0;
		if (write(fd, & code, sizeof(code)) != sizeof(code))
			return awk_false;

		len = htonl(val->str_value.len);
		if (write(fd, & len, sizeof(len)) != sizeof(len))
			return awk_false;

		if (write(fd, val->str_value.str, val->str_value.len)
				!= (ssize_t) val->str_value.len)
			return awk_false;
	}

	return awk_true;
}
예제 #9
0
static size_t
write_compact_array(json_t* array, stream_t* stream)
{
    size_t bytes = 0;

    assert(json_is_array(array));
    json_t* header = build_compact_array_header(array);

    uint8_t tag = BSER_TAG_COMPACT_ARRAY;
    if (stream->write(stream, &tag, SIZE_U8) == SIZE_U8) {
        size_t header_bytes = write_array(header, stream);
        if (header_bytes > 0) {
            size_t array_length = json_array_size(array);

            json_t* array_length_node = json_integer(array_length);
            size_t integer_size = write_integer(array_length_node, stream);
            if (integer_size > 0) {
                size_t written = write_compact_objects(array, header, stream);
                if (written > 0) {
                    bytes = SIZE_U8 + header_bytes + integer_size + written;
                }
            }
            json_decref(array_length_node);
        }
    }
    json_decref(header);
    return bytes;
}
예제 #10
0
char *save_gene_for_phase_and_front(int pi /* population index */, const char *save_prefix, 
                                     int phase, int front_index)
{
  static char gene_save_name[255];
  double *gene = genes[pi];
  double *fitness = fitness_matrix + FITNESS_INDEX(pi);
  sprintf(gene_save_name, "p%d-f%dgene.bin", phase, front_index);
  add_to_script(gene_save_name, phase, fitness);
  sprintf(gene_save_name, "%s/p%d-f%dgene.bin", save_prefix, phase, front_index);
  mprintf(1, "individOnFront[{phase -> %d, frontIndex -> %d}] -> \n", phase, front_index + 1);

  assert(FITNESS_COUNT == 2);
  mprintf(1, "\t{fitness -> {%lf, %lf}, age -> %d, layer -> %d, meetsGoal -> %s, geneFilename -> \"%s\"",
          fitness[0], fitness[1], ages[pi], LAYER_OF_INDIV(pi), is_goal_fitness(fitness) ? "True" : "False",
          gene_save_name);

#ifdef PRINT_GENE_CHAR
  mprintf(1, ", gene -> { %lf" , gene[0]);
  int i;
  for (i = 1; i < GENE_COUNT; i++) 
    mprintf(0, ", %lf", gene[i]);
  mprintf(0, "} ");
#endif
  mprintf(0, "}\n");

  write_array(gene_save_name, GENE_COUNT, gene);
  return gene_save_name;
}
예제 #11
0
pfs::error_code ubjson_ostream<OStreamType, JsonType>::write_json (json_type const & j, bool with_prefix)
{
    switch (j.type()) {
    case data_type::null:
        _os << UBJSON_CHAR_NULL;
        break;

    case data_type::boolean:
        _os << (j.boolean_data()
                ? UBJSON_CHAR_TRUE
                : UBJSON_CHAR_FALSE);
        break;

    case data_type::integer:
        return write_integer(j.integer_data(), with_prefix);

    case data_type::real:
        return write_real(j.real_data(), with_prefix);

    case data_type::string:
        return write_string(j.string_data(), with_prefix);

    case data_type::array:
        return write_array(j);

    case data_type::object:
        return write_object(j);
    }

    return pfs::error_code();
}
예제 #12
0
 BinaryOutputStream& BinaryOutputStream::write_record(
     Numeric::float64* data, int nbr
 ) {
     begin_record();
     write_array(data, nbr);
     end_record();
     return *this;
 }
예제 #13
0
파일: schema.c 프로젝트: 7472741/impala
int avro_schema_to_json(const avro_schema_t schema, avro_writer_t out)
{
	check_param(EINVAL, is_avro_schema(schema), "schema");
	check_param(EINVAL, out, "writer");

	int rval;

	if (is_avro_primitive(schema)) {
		check(rval, avro_write_str(out, "{\"type\":\""));
	}

	switch (avro_typeof(schema)) {
	case AVRO_STRING:
		check(rval, avro_write_str(out, "string"));
		break;
	case AVRO_BYTES:
		check(rval, avro_write_str(out, "bytes"));
		break;
	case AVRO_INT32:
		check(rval, avro_write_str(out, "int"));
		break;
	case AVRO_INT64:
		check(rval, avro_write_str(out, "long"));
		break;
	case AVRO_FLOAT:
		check(rval, avro_write_str(out, "float"));
		break;
	case AVRO_DOUBLE:
		check(rval, avro_write_str(out, "double"));
		break;
	case AVRO_BOOLEAN:
		check(rval, avro_write_str(out, "boolean"));
		break;
	case AVRO_NULL:
		check(rval, avro_write_str(out, "null"));
		break;
	case AVRO_RECORD:
		return write_record(out, avro_schema_to_record(schema));
	case AVRO_ENUM:
		return write_enum(out, avro_schema_to_enum(schema));
	case AVRO_FIXED:
		return write_fixed(out, avro_schema_to_fixed(schema));
	case AVRO_MAP:
		return write_map(out, avro_schema_to_map(schema));
	case AVRO_ARRAY:
		return write_array(out, avro_schema_to_array(schema));
	case AVRO_UNION:
		return write_union(out, avro_schema_to_union(schema));
	case AVRO_LINK:
		return write_link(out, avro_schema_to_link(schema));
	}

	if (is_avro_primitive(schema)) {
		return avro_write_str(out, "\"}");
	}
	avro_set_error("Unknown schema type");
	return EINVAL;
}
예제 #14
0
int
main(int argc, char **argv)
{
	if (argc < 3) {
		printf("usage: jpg2hdr outfile.hdr infile.jpg\n");
	}

	write_array(argv[1], argv[2]);
}
예제 #15
0
파일: a2.cpp 프로젝트: asrathor/CSE-250
int main(int argc, char* argv[]) {
	A2 A = create_array();  
	write_array(A);

    std::cout << ((read_array(A) == 1) ? "ok" : "error") << std::endl;
    std::cout << ((A.rsum(N - 1) == 1) ? "ok" : "error") << std::endl;
    std::cout << ((A.csum(M - 1) == 1) ? "ok" : "error") << std::endl;

    return 0;
} // main
예제 #16
0
void write_device_array(const char *filename, T *data, int n) {
    T *host_array = new T[n];
    if(cudaMemcpy(host_array, data, n*sizeof(T), cudaMemcpyDeviceToHost)
            != cudaSuccess) {
        BOOST_LOG_TRIVIAL(error) << "Error copying data from device to host.";
        exit(4);
    }
    write_array(filename, host_array, n);
    delete []host_array;
}
예제 #17
0
파일: rwarray.c 프로젝트: LediKorsar/gawk
static NODE *
do_writea(int nargs)
{
	NODE *file, *array;
	int ret;
	int fd;
	uint32_t major = MAJOR;
	uint32_t minor = MINOR;

	if (do_lint && get_curfunc_arg_count() > 2)
		lintwarn("writea: called with too many arguments");

	/* directory is first arg, array to dump is second */
	file = get_scalar_argument(0, FALSE);
	array = get_array_argument(1, FALSE);

	/* open the file, if error, set ERRNO and return */
	(void) force_string(file);
	fd = creat(file->stptr, 0600);
	if (fd < 0) {
		goto done1;
	}

	if (write(fd, MAGIC, strlen(MAGIC)) != strlen(MAGIC))
		goto done1;

	major = htonl(major);
	if (write(fd, & major, sizeof(major)) != sizeof(major))
		goto done1;

	minor = htonl(minor);
	if (write(fd, & minor, sizeof(minor)) != sizeof(minor))
		goto done1;

	ret = write_array(fd, array);
	if (ret != 0)
		goto done1;
	ret = 0;
	goto done0;

done1:
	ret = -1;
	update_ERRNO();
	unlink(file->stptr);

done0:
	close(fd);

	/* Set the return value */
	return make_number((AWKNUM) ret);
}
예제 #18
0
/* WARNING i#262: if you use the cmake binary package, ctest is built
 * without a GNU_STACK section, which causes the linux kernel to set
 * the READ_IMPLIES_EXEC personality flag, which is propagated to
 * children and causes all mmaps to be +x, breaking all these tests
 * that check for mmapped memory to be +rw or +r!
 */
static
void global_test(void)
{
    char *array;
    uint prot;
    dr_fprintf(STDERR, "  testing global memory alloc...");
    array = dr_global_alloc(SIZE);
    write_array(array);
    dr_query_memory((const byte *)array, NULL, NULL, &prot);
    if (prot != get_os_mem_prot(DR_MEMPROT_READ|DR_MEMPROT_WRITE))
        dr_fprintf(STDERR, "[error: prot %d doesn't match rw] ", prot);
    dr_global_free(array, SIZE);
    dr_fprintf(STDERR, "success\n");
}
예제 #19
0
static size_t
write_json(json_t* json, stream_t* stream)
{
    switch (json_typeof(json)) {
        case JSON_OBJECT:  return write_object(json, stream);
        case JSON_ARRAY:   return write_array(json, stream);
        case JSON_STRING:  return write_string(json, stream);
        case JSON_INTEGER: return write_integer(json, stream);
        case JSON_REAL:    return write_real(json, stream);
        case JSON_TRUE:    return write_true(json, stream);
        case JSON_FALSE:   return write_false(json, stream);
        case JSON_NULL:    return write_null(json, stream);
        default:           return 0;
    }
}
예제 #20
0
void write_fields(io::stream& e, const void* object, const bsreq* req, const bsreq* skip = 0) {
	auto count = 0;
	for(auto f = req; *f; f++) {
		if(skip && skip == f)
			continue;
		if(isempthy(object, req))
			continue;
		if(count > 0)
			e << " ";
		e << req->id;
		e << "(";
		write_array(e, object, req, 0);
		e << ")";
		count++;
	}
	e << "\r\n";
}
예제 #21
0
파일: StyleTask.C 프로젝트: juddy/edcde
static void
write_attr(FlexBuffer *f_buffer, const AttributeRec *arec)
{
  f_buffer->writeStr( SGMLName::lookup( arec->getAttrName() ) );
  f_buffer->writeStr( ":\t" );

  const char *val = arec->getAttrValueString();

  /* NAMES, NUMBERS convert to arrays for stylesheet lang. */
  if(arec->getAttrType() == SGMLName::TOKEN
     && strchr(val, ' ')){
    write_array(f_buffer, val, 0);
  }else{
    /* NASTY HACK!
     * The stylesheet internal language requires some feature
     * values to be quoted, and some not. It _seems_ that
     * quoting anything that doesn't start with a digit will
     * satisfy the constraints. This is highly artificial!
     *
     * Now for the exception that proves the hack!
     * TRUE and FALSE...
     *
     * One more to add to the heap, 
     * if an attribute value is being referenced
     */

    int quotes = !isdigit(val[0])
      && strcmp(val, "TRUE") != 0
      && strcmp(val, "FALSE") != 0
      && val[0] != '@';
  
    if(quotes) f_buffer->writeStr( "\"" );
    f_buffer->writeStr( val );
    if(quotes) f_buffer->writeStr( "\"" );
  }
}
예제 #22
0
static
void nonheap_test(void)
{
    uint prot;
    char *array =
        dr_nonheap_alloc(SIZE, DR_MEMPROT_READ|DR_MEMPROT_WRITE|DR_MEMPROT_EXEC);
    dr_fprintf(STDERR, "  testing nonheap memory alloc...");
    write_array(array);
    dr_query_memory((const byte *)array, NULL, NULL, &prot);
    if (prot != get_os_mem_prot((DR_MEMPROT_READ|DR_MEMPROT_WRITE|DR_MEMPROT_EXEC)))
        dr_fprintf(STDERR, "[error: prot %d doesn't match rwx] ", prot);
    dr_memory_protect(array, SIZE, DR_MEMPROT_NONE);
    dr_query_memory((const byte *)array, NULL, NULL, &prot);
    if (prot != get_os_mem_prot(DR_MEMPROT_NONE))
        dr_fprintf(STDERR, "[error: prot %d doesn't match none] ", prot);
    dr_memory_protect(array, SIZE, DR_MEMPROT_READ);
    dr_query_memory((const byte *)array, NULL, NULL, &prot);
    if (prot != get_os_mem_prot(DR_MEMPROT_READ))
        dr_fprintf(STDERR, "[error: prot %d doesn't match r] ", prot);
    if (dr_safe_write(array, 1, (const void *) &prot, NULL))
        dr_fprintf(STDERR, "[error: should not be writable] ");
    dr_nonheap_free(array, SIZE);
    dr_fprintf(STDERR, "success\n");
}
예제 #23
0
static
void raw_alloc_test(void)
{
    uint prot;
    char *array = PREFERRED_ADDR;
    dr_mem_info_t info;
    bool res;
    dr_fprintf(STDERR, "  testing raw memory alloc...");
    res = dr_raw_mem_alloc(PAGE_SIZE, DR_MEMPROT_READ | DR_MEMPROT_WRITE,
                           array) != NULL;
    if (!res) {
        dr_fprintf(STDERR, "[error: fail to alloc at "PFX"]\n", array);
        return;
    }
    write_array(array);
    dr_query_memory((const byte *)array, NULL, NULL, &prot);
    if (prot != get_os_mem_prot(DR_MEMPROT_READ|DR_MEMPROT_WRITE))
        dr_fprintf(STDERR, "[error: prot %d doesn't match rw]\n", prot);
    dr_raw_mem_free(array, PAGE_SIZE);
    dr_query_memory_ex((const byte *)array, &info);
    if (info.prot != DR_MEMPROT_NONE)
        dr_fprintf(STDERR, "[error: prot %d doesn't match none]\n", info.prot);
    dr_fprintf(STDERR, "success\n");
}
예제 #24
0
static
void raw_alloc_test(void)
{
    uint prot;
    char *array, *preferred;
    dr_mem_info_t info;
    bool res;
    dr_fprintf(STDERR, "  testing raw memory alloc...");

    /* Find a free region of memory without inadvertently "preloading" it.
     * First probe by allocating 2x the platform allocation alignment unit.
     */
    array = dr_raw_mem_alloc(HINT_ALLOC_SIZE, DR_MEMPROT_READ | DR_MEMPROT_WRITE, NULL);
    /* Then select the second half as the preferred address for the allocation test. */
    preferred = (void *)((ptr_uint_t)array + HINT_OFFSET);
    /* Free the probe allocation. */
    dr_raw_mem_free(array, HINT_ALLOC_SIZE);
    array = preferred;

    /* Now `array` is guaranteed to be available. */
    res = dr_raw_mem_alloc(PAGE_SIZE, DR_MEMPROT_READ | DR_MEMPROT_WRITE,
                           array) != NULL;
    if (!res) {
        dr_fprintf(STDERR, "[error: fail to alloc at "PFX"]\n", array);
        return;
    }
    write_array(array);
    dr_query_memory((const byte *)array, NULL, NULL, &prot);
    if (prot != get_os_mem_prot(DR_MEMPROT_READ|DR_MEMPROT_WRITE))
        dr_fprintf(STDERR, "[error: prot %d doesn't match rw]\n", prot);
    dr_raw_mem_free(array, PAGE_SIZE);
    dr_query_memory_ex((const byte *)array, &info);
    if (info.prot != DR_MEMPROT_NONE)
        dr_fprintf(STDERR, "[error: prot %d doesn't match none]\n", info.prot);
    dr_fprintf(STDERR, "success\n");
}
예제 #25
0
static
void custom_windows_test(void)
{
    void *array;
    MEMORY_BASIC_INFORMATION mbi;
    bool ok;

    dr_fprintf(STDERR, "  testing custom windows alloc....");

    array = dr_custom_alloc(NULL, DR_ALLOC_NON_HEAP | DR_ALLOC_NON_DR |
                            DR_ALLOC_RESERVE_ONLY, PAGE_SIZE*2,
                            DR_MEMPROT_NONE, NULL);
    if (array == NULL)
        dr_fprintf(STDERR, "error: unable to reserve\n");
    if (dr_virtual_query(array, &mbi, sizeof(mbi)) != sizeof(mbi))
        dr_fprintf(STDERR, "error: unable to query prot\n");
    /* 0 is sometimes returned (see VirtualQuery docs) */
    if (mbi.Protect != PAGE_NOACCESS && mbi.Protect != 0)
        dr_fprintf(STDERR, "error: wrong reserve prot %x\n", mbi.Protect);
    if (mbi.State != MEM_RESERVE)
        dr_fprintf(STDERR, "error: memory wasn't reserved\n");

    array = dr_custom_alloc(NULL, DR_ALLOC_NON_HEAP | DR_ALLOC_NON_DR |
                            DR_ALLOC_COMMIT_ONLY | DR_ALLOC_FIXED_LOCATION,
                            PAGE_SIZE, DR_MEMPROT_READ | DR_MEMPROT_WRITE, array);
    if (array == NULL)
        dr_fprintf(STDERR, "error: unable to commit\n");
    if (dr_virtual_query(array, &mbi, sizeof(mbi)) != sizeof(mbi))
        dr_fprintf(STDERR, "error: unable to query prot\n");
    if (mbi.Protect != PAGE_READWRITE)
        dr_fprintf(STDERR, "error: wrong commit prot %x\n", mbi.Protect);
    if (mbi.State != MEM_COMMIT || mbi.RegionSize != PAGE_SIZE)
        dr_fprintf(STDERR, "error: memory wasn't committed\n");

    write_array(array);

    ok = dr_custom_free(NULL, DR_ALLOC_NON_HEAP | DR_ALLOC_NON_DR |
                        DR_ALLOC_COMMIT_ONLY, array, PAGE_SIZE);
    if (!ok)
        dr_fprintf(STDERR, "error: failed to de-commit\n");
    if (dr_virtual_query(array, &mbi, sizeof(mbi)) != sizeof(mbi))
        dr_fprintf(STDERR, "error: unable to query prot\n");
    /* 0 is sometimes returned (see VirtualQuery docs) */
    if (mbi.Protect != PAGE_NOACCESS && mbi.Protect != 0)
        dr_fprintf(STDERR, "error: wrong decommit prot %x\n", mbi.Protect);
    if (mbi.State != MEM_RESERVE)
        dr_fprintf(STDERR, "error: memory wasn't de-committed %x\n", mbi.State);

    ok = dr_custom_free(NULL, DR_ALLOC_NON_HEAP | DR_ALLOC_NON_DR |
                        DR_ALLOC_RESERVE_ONLY, array, PAGE_SIZE*2);
    if (!ok)
        dr_fprintf(STDERR, "error: failed to un-reserve\n");
    if (dr_virtual_query(array, &mbi, sizeof(mbi)) != sizeof(mbi))
        dr_fprintf(STDERR, "error: unable to query prot\n");
    /* 0 is sometimes returned (see VirtualQuery docs) */
    if (mbi.Protect != PAGE_NOACCESS && mbi.Protect != 0)
        dr_fprintf(STDERR, "error: wrong unreserve prot %x\n", mbi.Protect);
    if (mbi.State != MEM_FREE)
        dr_fprintf(STDERR, "error: memory wasn't un-reserved\n");

    dr_fprintf(STDERR, "success\n");
}
예제 #26
0
static
void custom_test(void)
{
    void *drcontext = dr_get_current_drcontext();
    void *array;
    size_t size;
    uint prot;

    dr_fprintf(STDERR, "  testing custom memory alloc....");

    /* test global */
    array = dr_custom_alloc(NULL, 0, SIZE, 0, NULL);
    write_array(array);
    dr_custom_free(NULL, 0, array, SIZE);

    array = dr_custom_alloc(NULL, DR_ALLOC_CACHE_REACHABLE, SIZE, 0, NULL);
    ASSERT(reachable_from_client(array));
    write_array(array);
    dr_custom_free(NULL, DR_ALLOC_CACHE_REACHABLE, array, SIZE);

    /* test thread-local */
    array = dr_custom_alloc(drcontext, DR_ALLOC_THREAD_PRIVATE, SIZE, 0, NULL);
    write_array(array);
    dr_custom_free(drcontext, DR_ALLOC_THREAD_PRIVATE, array, SIZE);

    array = dr_custom_alloc(drcontext, DR_ALLOC_THREAD_PRIVATE|DR_ALLOC_CACHE_REACHABLE,
                            SIZE, 0, NULL);
    ASSERT(reachable_from_client(array));
    write_array(array);
    dr_custom_free(drcontext, DR_ALLOC_THREAD_PRIVATE|DR_ALLOC_CACHE_REACHABLE,
                   array, SIZE);

    /* test non-heap */
    array = dr_custom_alloc(NULL, DR_ALLOC_NON_HEAP, PAGE_SIZE,
                            DR_MEMPROT_READ|DR_MEMPROT_WRITE, NULL);
    write_array(array);
    dr_custom_free(NULL, DR_ALLOC_NON_HEAP, array, PAGE_SIZE);

    array = dr_custom_alloc(NULL, DR_ALLOC_NON_HEAP|DR_ALLOC_FIXED_LOCATION, PAGE_SIZE,
                            DR_MEMPROT_READ|DR_MEMPROT_WRITE, PREFERRED_ADDR);
    ASSERT(array == (void *)PREFERRED_ADDR);
    write_array(array);
    dr_custom_free(NULL, DR_ALLOC_NON_HEAP|DR_ALLOC_FIXED_LOCATION, array, PAGE_SIZE);

    array = dr_custom_alloc(NULL, DR_ALLOC_NON_HEAP|DR_ALLOC_CACHE_REACHABLE,
                            PAGE_SIZE, DR_MEMPROT_READ|DR_MEMPROT_WRITE, NULL);
    ASSERT(reachable_from_client(array));
    write_array(array);
    dr_custom_free(NULL, DR_ALLOC_NON_HEAP|DR_ALLOC_CACHE_REACHABLE,
                   array, PAGE_SIZE);

    array = dr_custom_alloc(NULL, DR_ALLOC_NON_HEAP|DR_ALLOC_LOW_2GB,
                            PAGE_SIZE, DR_MEMPROT_READ|DR_MEMPROT_WRITE, NULL);
#ifdef X64
    ASSERT((ptr_uint_t)array < 0x80000000);
#endif
    write_array(array);
    dr_custom_free(NULL, DR_ALLOC_NON_HEAP|DR_ALLOC_LOW_2GB, array, PAGE_SIZE);

    array = dr_custom_alloc(NULL, DR_ALLOC_NON_HEAP|DR_ALLOC_NON_DR,
                            PAGE_SIZE, DR_MEMPROT_READ|DR_MEMPROT_WRITE, NULL);
    write_array(array);
    dr_custom_free(NULL, DR_ALLOC_NON_HEAP|DR_ALLOC_NON_DR,
                   array, PAGE_SIZE);

    array = dr_custom_alloc(NULL, DR_ALLOC_NON_HEAP, PAGE_SIZE,
                            DR_MEMPROT_READ|DR_MEMPROT_WRITE|DR_MEMPROT_EXEC, NULL);
    ASSERT(dr_query_memory((byte *)array, NULL, &size, &prot) &&
           size == PAGE_SIZE && prot == (DR_MEMPROT_READ|DR_MEMPROT_WRITE|
                                         DR_MEMPROT_EXEC));
    write_array(array);
    dr_custom_free(NULL, DR_ALLOC_NON_HEAP, array, PAGE_SIZE);

    dr_fprintf(STDERR, "success\n");
}
예제 #27
0
/////////////////////////////////////////////////////////////////////////////////////////////
//int main(int argc, char *argv[ ])
//{
 int main(void)
 {
   	sortingindex = 0;
	if((fpp = fopen(INPUT_FILE, "r")) == NULL)
	{
		printf("Cannot open 'parameter_file'.\n");
	}

	for(j = 0; j < 11; j++)
	{
		if(fscanf(fpp, "%lf", &tmp) != EOF) 
		{
				my_array[j] = tmp;
				
		} else 		{
			printf("Not enough data in 'input_parameter'!");
		}
	}

	fclose(fpp);
 

   	In_n= (int) my_array[0];
	In_vect_n= (int) my_array[1];
	Out_n= (int) my_array[2];
	Mf_n= (int) my_array[3];
	training_data_n= (int) my_array[4];
	checking_data_n= (int) my_array[5];
	epoch_n= (int) my_array[6];
	step_size=my_array[7];
	increase_rate=my_array[8];
	decrease_rate=my_array[9];
	threshold = my_array[10];
		
	Rule_n = (int)pow((double)Mf_n, (double)In_n); //number of rules 
	Node_n = In_n + In_n*Mf_n + 3*Rule_n + In_n*Rule_n + Out_n;

	/* allocate matrices and memories */
	int trnnumcheck[training_data_n + 1];
	int trnnumchecku[training_data_n + 1];
	for(i=0; i<training_data_n +1; i++)
	{
	trnnumcheck[i]=0;
	trnnumchecku[i]=0;
	}
	
	diff =(double **)create_matrix(Out_n, training_data_n, sizeof(double)); 
	double chkvar[checking_data_n];
	double cdavg[checking_data_n];
	double chkvar_un[checking_data_n];
	double cdavg_un[checking_data_n];
	target = calloc(Out_n, sizeof(double));
	de_out = calloc(Out_n, sizeof(double));
	node_p = (NODE_T **)create_array(Node_n, sizeof(NODE_T *)); 
	config = (int **)create_matrix(Node_n, Node_n, sizeof(int)); 
	training_data_matrix = (double **)create_matrix(training_data_n, In_n*In_vect_n + Out_n, sizeof(double));
	if(checking_data_n > 0)
	{
		checking_data_matrix = (double **)create_matrix(checking_data_n, In_n*In_vect_n +Out_n, sizeof(double));
		checking_data_matrix_un = (double **)create_matrix(checking_data_n, Out_n, sizeof(double));
		chk_output =  (double **)create_matrix(checking_data_n, Out_n, sizeof(double));
	}
	layer_1_to_4_output = (COMPLEX_T **)create_matrix(training_data_n, In_n*Mf_n + 3*Rule_n, sizeof(COMPLEX_T));
	trn_rmse_error = calloc(epoch_n, sizeof(double));
	trnNMSE = calloc(epoch_n, sizeof(double));
	chk_rmse_error = calloc(epoch_n, sizeof(double));
	kalman_parameter = (double **)create_matrix(Out_n ,(In_n*In_vect_n + 1)*Rule_n, sizeof(double)); 
	kalman_data = (double **)create_matrix(Out_n ,(In_n*In_vect_n + 1)*Rule_n, sizeof(double));
	step_size_array = calloc(epoch_n, sizeof(double));
	ancfis_output = (double **)create_matrix(training_data_n , Out_n, sizeof(double)); 
	trn_error =calloc(Out_n +1, sizeof(double));
	chk_error_n = calloc(Out_n +1, sizeof(double));// changing size for adding new error measures
	chk_error_un = calloc(Out_n +1, sizeof(double));// changing size for adding new error measures
	trn_datapair_error = calloc(training_data_n, sizeof(double));
	trn_datapair_error_sorted = (double **)create_matrix(2,training_data_n, sizeof(double));
	NMSE = calloc(Out_n, sizeof(double));
	NDEI = calloc(Out_n, sizeof(double));
	unNMSE = calloc(Out_n, sizeof(double));
	unNDEI = calloc(Out_n, sizeof(double));
	//Build Matrix of 0 nd 1 to show the connected nodes
	gen_config(In_n, Mf_n,Out_n, config);//gen_config.c
	//With the above matrix, build ANCFIS connected nodes
	build_ancfis(config); //datastru.c
	//Find total number of nodes in layer 1 and 5
	parameter_n = set_parameter_mode(); //datastru.c
	parameter_array = calloc(parameter_n, sizeof(double));
	initpara(TRAIN_DATA_FILE, training_data_n, In_n, In_vect_n+1, Mf_n); // initpara.c
// after this step, the parameters (they are present in layer 1 and layer 5 only) are assigned a random initial value 
// using some basic algebra and these value are then stored in "para.ini"
	get_parameter(node_p,Node_n,INIT_PARA_FILE); //input.c
// after this step, the initial random values of the parametrs are read from "oara.ini" and assigned to the appropriate nodes in the node structure by accessing their para list.
	//Get training and testing data
	get_data(TRAIN_DATA_FILE, training_data_n, training_data_matrix); //input.c
// after this step, the training input data is read from the "data.trn" fle and stroed in the training data matrix.
	get_data(CHECK_DATA_FILE, checking_data_n, checking_data_matrix); //input.c
// after the above step, the checking data is read from the "data.chk" file and then stored in the checking data matrix.

	for(i=0; i< Out_n; i++)
	{
	for(j=0; j<training_data_n; j++)
	{
	trnavg = trnavg + training_data_matrix[j][(i+1)*In_vect_n +i];
	}
	}
	trnavg = trnavg /(Out_n * training_data_n);
	
	for(i=0; i< Out_n; i++)
	{
	for(j=0; j<training_data_n; j++)
	{
	temp = training_data_matrix[j][(i+1)*In_vect_n +i]- trnavg;
	temp = temp*temp;
	trnvariance = trnvariance + temp;
	}
	}
	trnvariance = trnvariance /((Out_n * training_data_n)-1);

	temp = 0.0;
	for(i=0; i< Out_n; i++)
	{
	for(j=0; j< checking_data_n; j++)
	{
	chkavg = chkavg + checking_data_matrix[j][(i+1)*In_vect_n +i];
	}
	}
	chkavg = chkavg /(Out_n * checking_data_n);
	
	for(i=0; i< Out_n; i++)
	{
	for(j=0; j<checking_data_n; j++)
	{
	temp = checking_data_matrix[j][(i+1)*In_vect_n +i]- chkavg;
	temp = temp*temp;
	chkvariance = chkvariance + temp;
	}
	}
	chkvariance = chkvariance /((Out_n * checking_data_n)-1);
	printf("epochs \t trn error \t tst error\n");
	printf("------ \t --------- \t ---------\n");
	//printf("not entering the epoch loop and the i loop yoyoyo\n");
/**************
	for(ep_n = 0; ep_n < epoch_n; ep_n++)
	{ 
		//step_size_pointer= &step_size;		
		//printf("epoch numbernumber %d \n", ep_n);	
		//step_size_array[ep_n] = step_size_pointer;
		step_size_array[ep_n] = step_size;
	// after the above step, the updated stepsize at the end of the last loop is stored in the step_size_array.
	// this will keep happening every time we start en epoch and hence at the end of the loop, step_size_array will 
	// have a list of all the updated step sizes. Since this is a offline version, step sizes are updated only
	// at the end of an epoch. 
		for(m = 0; m < Out_n; m++)
		{ 	
			//printf("m loop number %d \n", m);	
			for(j = 0; j < training_data_n; j++)
			{ 
				//printf("j loop number %d \n", j);				
				//copy the input vector(s) to input node(s)
				put_input_data(node_p,j, training_data_matrix); //input.c
	// after this(above) step, the input data is transferred frm the training data matrix to the "node" structure.
				//printf("testing \n");	
				//printf("reeeetesting \n");	
				target[m] = training_data_matrix[j][(m+1)*In_vect_n+m]; // *** 
	// this step assigns the value of the "m"th output of "j" th trainig data pair to target.
				//printf("testing \n");	
				//forward pass, get node outputs from layer 1 to layer 4
				calculate_output(In_n, In_n + In_n*Mf_n + 3*Rule_n - 1, j); //forward.c
	// after this step, output of nodes in layer 1 to 4 is calculated. Please note that when this happens for the first
	// time, i.e. when ep_n=0, our network parametrs are already initialized. thus, it is possible to get the
	// output of each node using the function definitios proposed in forward.c. After first epoch, our parametrs get 
	// updated and this output is then calculated using teh new parameters. The essential point to note here is that
	// we can always calculate the output of each node since we have already initialized our parameters.
				//printf("testing \n");	
				//put outputs of layer 1 to 4 into layer_1_to_4_output
		
				for(k = 0; k < Mf_n*In_n + 3*Rule_n; k++)
				{
				//printf("testing \n");	
				layer_1_to_4_output[j][k] = *node_p[k + In_n]->value;
				}
	// the above loop simply puts the values of nodes from layer 1 to layer 4 in the layer_1_to_4_output matrix.

				//identify layer 5 params using LSE (Kalman filter)
				//printf("testing \n");	
				get_kalman_data(kalman_data, target); //kalman.c
	// this function call finds out the values of O4iXnl .. these are basically the coefficients
	// of the kalman parametrs for a given training data pair
	//puts them in kalman_data matrix.
	// this kalman_data matrix has In_n number of rows and number of columns equal to number of parametrs that are
	// responsible for determining each output... as stated above, the outputs are actually the coefficients of the
	// parameters.

				//printf("testing \n");	
				//calculate Kalman parameters
				
				kalman(ep_n, j+(m*training_data_n), m, kalman_data, kalman_parameter,target); //kalman.c
	// this function call evaluates kalman parametrs for a given output, for a given epoch.. that is it takes the epoch 
	// number from us, takes the info about how many times has kalman been invoked before, also takes in the
	// output number(row number) for whihc the parametrs are to be found out... it also takes kalman_data and reads 
	// from it to estimate the kalman parameters... it also takes target .. and stores the output in the mth row of 
	// kalman_parameter.
				//printf("testing \n");	
			}
	// let me tell u what the abopve loop is doing.. after observing closely, it is easy to see that in the above loop, 
	// for a given output node, one by one, all the training data are taken inside the ANCFIS structure, outputs
	// are calculated from one to 4, then a recursive kalman filetr is used to identify the kalman
	// parametrs corresponding to the output node.. these kalman parameters are updated after every tarining data pair 
	// and finally at the end of all the training data, we have an estimate for the kalman parametrs corresponding to 		// the output node.
		}
	// thus, at the of the above loop, the kalman parametrs for all the output nodes are evaluated...

	// now, we are ready to actually calculate the outputs.. plase remember that, all this while, the actual 
	// values of the parametrs of nodes in layer 1 and layer 5 are the ones that were randomly initialized.

		for(j = 0; j < training_data_n; j++)
		{ 
			//printf("testing 1\n");	
			put_input_data(node_p,j, training_data_matrix); //input.c
			//printf("testing 2 \n");	
			for(k = 0; k < Mf_n*In_n + 3*Rule_n; k++)
			{
				*node_p[k + In_n]->value = layer_1_to_4_output[j][k];
			}
	// u must be able to see that in the above two loops, each time, whatever output we got for a given training 
	// datta pair, it was safely stored in layer_1_to_4 array...and each time, the value on the actual nodes in the
	// structure got changed.. due to new incoming training data pair..this was periodic with period trainingdata_n..
	// that is for each output node, we got the same results for a given training dat aapir.. that is the node values
	// were independent of m. Now, for a given traing data pair, we are getting those value back in the actual node 
	// node structure from that laye blh blah matrix..

			//printf("testing 3\n");	
			put_kalman_parameter(Out_n,kalman_parameter); //kalman.c
	// using this function call, we are placing the setimated value of the layer 5 parametrs in the node structure
	// by accessing each node and its parameter list.
			// Do forward pass for L5 and L6
			calculate_output(In_n + In_n*Mf_n + 3*Rule_n, Node_n, j); //forward.c
	// for a given value of the training data pair, this function calculates the output of layer 5 and layer 6 nodes 
	// and places them in the node structure.

			calculate_root(training_data_matrix,diff,j,node_p); //debug.c
	// this function call calculates the square of the erro between the predicted value of an output node and the 
	// corresponding actual value in the training data matrix..(actual output) and stores it in diff.
	// this call performs the above action for a given training data pair and for all output nodes.

			// Do backward pass and calculate all error rate for each node
			calculate_de_do(j,Out_n,target,ancfis_output); //backward.c
	// calculates de_do for each node fora given training data pair
			update_de_dp(j); //de_dp.c	
	// updates de_do for each node....
		}
	// thus at the end of this loop, estimated outputs for all the training data are calculated..also back propogatin 
	// is done and de_dp for all the nodes is updated.
		
		//printf("testing 1\n");	
		calculate_trn_err(diff,trn_error,trn_datapair_error,training_data_n); //debug.c
		//printf("testing 2 \n");	
		//training_error_measure(target,ancfis_output,diff, training_data_n, trn_error,out_n); //trn_err.c
		trn_rmse_error[ep_n] = trn_error[Out_n];
		printf("%3d \t %.11f \n", ep_n+1, trn_error[Out_n]);
		//Find RMSE of testing error
	/*************************************	if(checking_data_n != 0) 
		{
			printf("testing 3 \n");	
			epoch_checking_error(checking_data_matrix, checking_data_n, chk_error, training_data_n, chk_output, ep_n); //chk_err.c  writes to tracking.txt
			printf("testing 4 \n");	
			chk_rmse_error[ep_n] = chk_error[Out_n];
			for (i=0; i<Out_n; i++)
			//printf("%3d \t %.11f \t %.11f\n", ep_n+1, trn_error[i], chk_error[i]);
			printf("%3d \t %.11f \n", ep_n+1, trn_error[i]);
			//printf("%.11f\t %.11f\n", trn_error[Out_n],chk_error[Out_n]);
			printf("%.11f\t %.11f\n", trn_error[Out_n]);
			write_result(ep_n+1,Out_n,trn_error,chk_error);  //debug.c writes to result.txt
		} 
		else 
		{
			for (i=0; i<Out_n; i++)	
			printf("%4d \t %.11f\n", ep_n+1, trn_error[i]);
		}
***************************/

/**
		//Find minimum training error and its epoch-number
		if(trn_rmse_error[ep_n] < min_trn_RMSE) {
			min_trn_RMSE_epoch = ep_n +1;
			min_trn_RMSE = trn_rmse_error[ep_n];
			record_parameter(parameter_array);
		}

		if(ep_n < epoch_n-1)
		{ 
			//update parameters in 1st layer (Using VNCSA)
			update_parameter(1, step_size); //new_para.c
			//update stepsize
			update_step_size(trn_rmse_error, ep_n, &step_size, decrease_rate, increase_rate); //stepsize.c
		}
	}
***/
////////////////////////////////////////////////////////////

fppp = (FILE *)open_file("status.txt", "w");
fpppp = (FILE *)open_file("trn.txt", "w");


	ep_n=0;

	do
	{
		//step_size_pointer= &step_size;		
		printf("epoch numbernumber %d \n", ep_n+1);	
		//step_size_array[ep_n] = step_size_pointer;
		step_size_array[ep_n] = step_size;
	// after the above step, the updated stepsize at the end of the last loop is stored in the step_size_array.
	// this will keep happening every time we start en epoch and hence at the end of the loop, step_size_array will 
	// have a list of all the updated step sizes. Since this is a offline version, step sizes are updated only
	// at the end of an epoch. 
		for(m = 0; m < Out_n; m++)
		{ 	
			//printf("m loop number %d \n", m);	
			for(j = 0; j < training_data_n; j++)
			{ 
				//printf("j loop number %d \n", j);				
				//copy the input vector(s) to input node(s)
				put_input_data(node_p,j, training_data_matrix); //input.c
	// after this(above) step, the input data is transferred frm the training data matrix to the "node" structure.
				//printf("testing \n");	
				//printf("reeeetesting \n");	
				target[m] = training_data_matrix[j][(m+1)*In_vect_n+m]; // *** 
	// this step assigns the value of the "m"th output of "j" th trainig data pair to target.
				//printf("testing \n");	
				//forward pass, get node outputs from layer 1 to layer 4
				calculate_output(In_n, In_n + In_n*Mf_n + 3*Rule_n, j); //forward.c
	// after this step, output of nodes in layer 1 to 4 is calculated. Please note that when this happens for the first
	// time, i.e. when ep_n=0, our network parametrs are already initialized. thus, it is possible to get the
	// output of each node using the function definitios proposed in forward.c. After first epoch, our parametrs get 
	// updated and this output is then calculated using teh new parameters. The essential point to note here is that
	// we can always calculate the output of each node since we have already initialized our parameters.
				//printf("testing \n");	
				//put outputs of layer 1 to 4 into layer_1_to_4_output
		
				for(k = 0; k < Mf_n*In_n + 3*Rule_n; k++)
				{
				//printf("testing \n");	
				layer_1_to_4_output[j][k] = *node_p[k + In_n]->value;
				//fprintf(fppp, "%lf \t %lf \t \n", (layer_1_to_4_output[j][k]).real, (layer_1_to_4_output[j][k]).imag);
				}
	// the above loop simply puts the values of nodes from layer 1 to layer 4 in the layer_1_to_4_output matrix.

				//identify layer 5 params using LSE (Kalman filter)
				//printf("testing \n");	
				get_kalman_data(kalman_data, target); //kalman.c
	// this function call finds out the values of O4iXnl .. these are basically the coefficients
	// of the kalman parametrs for a given training data pair
	//puts them in kalman_data matrix.
	// this kalman_data matrix has In_n number of rows and number of columns equal to number of parametrs that are
	// responsible for determining each output... as stated above, the outputs are actually the coefficients of the
	// parameters.

				//printf("testing \n");	
				//calculate Kalman parameters
				
				kalman(ep_n, j+(m*training_data_n), m, kalman_data, kalman_parameter,target); //kalman.c
	// this function call evaluates kalman parametrs for a given output, for a given epoch.. that is it takes the epoch 
	// number from us, takes the info about how many times has kalman been invoked before, also takes in the
	// output number(row number) for whihc the parametrs are to be found out... it also takes kalman_data and reads 
	// from it to estimate the kalman parameters... it also takes target .. and stores the output in the mth row of 
	// kalman_parameter.
				//printf("testing \n");	
			}
	// let me tell u what the abopve loop is doing.. after observing closely, it is easy to see that in the above loop, 
	// for a given output node, one by one, all the training data are taken inside the ANCFIS structure, outputs
	// are calculated from one to 4, then a recursive kalman filetr is used to identify the kalman
	// parametrs corresponding to the output node.. these kalman parameters are updated after every tarining data pair 
	// and finally at the end of all the training data, we have an estimate for the kalman parametrs corresponding to 		// the output node.
		}
	// thus, at the of the above loop, the kalman parametrs for all the output nodes are evaluated...

	// now, we are ready to actually calculate the outputs.. plase remember that, all this while, the actual 
	// values of the parametrs of nodes in layer 1 and layer 5 are the ones that were randomly initialized.

		for(j = 0; j < training_data_n; j++)
		{ 
			//printf("testing 1\n");	
			put_input_data(node_p,j, training_data_matrix); //input.c
			//printf("testing 2 \n");	
			for(k = 0; k < Mf_n*In_n + 3*Rule_n; k++)
			{
				*node_p[k + In_n]->value = layer_1_to_4_output[j][k];
				/*if(ep_n==1)
				{
				fprintf(fppp, "%d.\t %lf \t + \t i%lf \n", k, (layer_1_to_4_output[j][k]).real,(layer_1_to_4_output[j][k]).imag);
				}*/
			}
	// u must be able to see that in the above two loops, each time, whatever output we got for a given training 
	// datta pair, it was safely stored in layer_1_to_4 array...and each time, the value on the actual nodes in the
	// structure got changed.. due to new incoming training data pair..this was periodic with period trainingdata_n..
	// that is for each output node, we got the same results for a given training dat aapir.. that is the node values
	// were independent of m. Now, for a given traing data pair, we are getting those value back in the actual node 
	// node structure from that laye blh blah matrix..

			//printf("testing 3\n");	
			put_kalman_parameter(Out_n,kalman_parameter); //kalman.c
			//printf("hihahahha \n");
	// using this function call, we are placing the setimated value of the layer 5 parametrs in the node structure
	// by accessing each node and its parameter list.
			// Do forward pass for L5 and L6
			calculate_output(In_n + In_n*Mf_n + 3*Rule_n, Node_n, j); //forward.c
	// for a given value of the training data pair, this function calculates the output of layer 5 and layer 6 nodes 
	// and places them in the node structure.
			//printf("hihahahha  no 2 \n");
	calculate_root(training_data_matrix,diff,j,node_p); //debug.c
	// this function call calculates the square of the erro between the predicted value of an output node and the 
	// corresponding actual value in the training data matrix..(actual output) and stores it in diff.
	// this call performs the above action for a given training data pair and for all output nodes.

			// Do backward pass and calculate all error rate for each node
			calculate_de_do(j,Out_n,target,ancfis_output); //backward.c
			//printf("hihahahha no 3 \n");
	// calculates de_do for each node fora given training data pair
			update_de_dp(j); //de_dp.c	
	// updates de_do for each node....
		}
	// thus at the end of this loop, estimated outputs for all the training data are calculated..also back propogatin 
	// is done and de_dp for all the nodes is updated.
		
		//printf("testing 1\n");	
		calculate_trn_err(diff,trn_error, trn_datapair_error, training_data_n); //debug.c
		//printf("testing 2 \n");	
		//training_error_measure(target,ancfis_output,diff, training_data_n, trn_error,out_n); //trn_err.c
		trn_rmse_error[ep_n] = trn_error[Out_n];
		trnNMSE[ep_n] = trn_rmse_error[ep_n]*trn_rmse_error[ep_n]/trnvariance;
		fprintf(fppp, "epoch number is %d \t trn RMSE is %.11f \t trn NMSE is  %lf \t \n", ep_n + 1,  trn_rmse_error[ep_n], trnNMSE[ep_n]);
		//fprintf(fpppp, "\n");
		fprintf(fpppp, "epoch number is %d \t trn RMSE is %.11f \t trn NMSE is  %lf \t \n", ep_n + 1,  trn_rmse_error[ep_n], trnNMSE[ep_n]);
		printf("trn RMSE is \t %lf \n", trn_rmse_error[ep_n]);
		printf("trn NMSE is \t %lf \n", trnNMSE[ep_n]);
		for(i=0; i<training_data_n; i++)
		{
		trn_datapair_error_sorted[0][i]=trn_datapair_error[i];
		trn_datapair_error_sorted[1][i]= i+1;
		}

		for(j=1; j<training_data_n; j++)
		{		
		for(i=0; i<training_data_n-j; i++)
		{
		if(trn_datapair_error_sorted[0][i]>trn_datapair_error_sorted[0][i+1])
		{	
		sorting=trn_datapair_error_sorted[0][i+1];
		trn_datapair_error_sorted[0][i+1]=trn_datapair_error_sorted[0][i];
		trn_datapair_error_sorted[0][i]=sorting;
		sortingindex = sorting=trn_datapair_error_sorted[1][i+1];
		trn_datapair_error_sorted[1][i+1]=trn_datapair_error_sorted[1][i];
		trn_datapair_error_sorted[1][i]=sortingindex;
		}
		}
		}

		for(j=0; j<training_data_n; j++)
		{
		fprintf(fppp, "\n");		
		fprintf(fppp, "training data pair sorted number \t %d \n", j+1);
		fprintf(fppp, "training data pair original number \t %d \n", (int)(trn_datapair_error_sorted[1][j]));
		fprintf(fppp, "training data pair sorted error in RMSE is \t %lf \n",trn_datapair_error_sorted[0][j]);
		fprintf(fpppp, "%d \t", (int)(trn_datapair_error_sorted[1][j]));
		complexsum = complex(0.0, 0.0);
		fprintf(fppp,"Normalized layer 3 outputs are as follows \n");
		for(k= In_n*Mf_n + Rule_n; k< In_n*Mf_n + 2*Rule_n; k++)
		{
		fprintf(fppp, "%d.\t %lf + i%lf \t %lf < %lf \n", k, (layer_1_to_4_output[j][k]).real,(layer_1_to_4_output[j][k]).imag, c_abs(layer_1_to_4_output[j][k]), c_phase(layer_1_to_4_output[j][k])*180/PI);
		complexsum = c_add(complexsum, layer_1_to_4_output[j][k]);
		}
		
		
		fprintf(fppp, "Sum of the outputs of layer 3 is \t %lf+i%lf \t %lf<%lf \n", complexsum.real, complexsum.imag, c_abs(complexsum), c_phase(complexsum)*180/PI);
		complexsum = complex(0.0, 0.0);
		fprintf(fppp,"dot producted layer 4 outputs are as follows \n");
		for(k=In_n*Mf_n + 2*Rule_n; k< In_n*Mf_n + 3*Rule_n; k++)
		{
		
		fprintf(fppp, "%d.\t %lf + i%lf \t %lf < %lf \n", k, (layer_1_to_4_output[j][k]).real,(layer_1_to_4_output[j][k]).imag, c_abs(layer_1_to_4_output[j][k]), c_phase(layer_1_to_4_output[j][k])*180/PI);
		complexsum = c_add(complexsum, layer_1_to_4_output[j][k]);
		}

		fprintf(fppp, "sum of the outputs of layer 4 is \t %lf +i%lf \t %lf<%lf \n", complexsum.real, complexsum.imag, c_abs(complexsum), c_phase(complexsum)*180/PI);
		if(j> training_data_n -6 )
		{
		trnnumcheck[(int)(trn_datapair_error_sorted[1][j])]= trnnumcheck[(int)(trn_datapair_error_sorted[1][j])] +1;
		}
		if(j<5 )
		{
		trnnumchecku[(int)(trn_datapair_error_sorted[1][j])]= trnnumchecku[(int)(trn_datapair_error_sorted[1][j])] +1;
		}

		}
		fprintf(fpppp, "\n");
		
		//Find RMSE of testing error
/********************************************************************************
if(checking_data_n != 0) 
		{
			printf("testing 3 \n");	
			epoch_checking_error(checking_data_matrix, checking_data_n, chk_error, training_data_n, chk_output, ep_n); //chk_err.c  writes to tracking.txt
			printf("testing 4 \n");	
			chk_rmse_error[ep_n] = chk_error[Out_n];
			for (i=0; i<Out_n; i++)
			printf("%3d \t %.11f \t %.11f\n", ep_n+1, trn_error[i], chk_error[i]);
			printf("%.11f\t %.11f\n", trn_error[Out_n],chk_error[Out_n]);
			write_result(ep_n+1,Out_n,trn_error,chk_error);  //debug.c writes to result.txt
		} 
		else 
		{
			for (i=0; i<Out_n; i++)	
			printf("%4d \t %.11f\n", ep_n+1, trn_error[i]);
		}
**************************************************************************************/

		// check whether the current training RMSE is less than the threhold and store its epch number and parametrs
		
		if(trn_rmse_error[ep_n] < min_trn_RMSE) 
		{
			min_trn_RMSE_epoch = ep_n +1;
			min_trn_RMSE = trn_rmse_error[ep_n];
			min_trnNMSE = trnNMSE[ep_n];
			record_parameter(parameter_array);
		}

		if(ep_n < epoch_n-1)
		{ 
			//update parameters in 1st layer (Using VNCSA)
			update_parameter(1, step_size); //new_para.c
			//update stepsize
			update_step_size(trn_rmse_error, ep_n, &step_size, decrease_rate, increase_rate); //stepsize.c
		}
		ep_n++;
		
	} while((trnNMSE[ep_n -1]>= threshold) && (ep_n <= epoch_n -1));

for(i=1; i<=training_data_n; i++)
{
	fprintf(fpppp, "%d \t %d \n", i, trnnumcheck[i]);
}
for(i=1; i<=training_data_n; i++)
{
	fprintf(fpppp, "%d \t %d \n", i, trnnumchecku[i]);
}


if(trnNMSE[ep_n -1]< threshold)
{
fprintf(fppp, "\n");
fprintf(fppp, "We have gone below the threshold value \n");
fprintf(fppp, "the epoch number in which this happened is %d \n", min_trn_RMSE_epoch);
}
else
{
fprintf(fppp, "\n");
fprintf(fppp, "We exhausted the available epochs and threshold was not broken :( \n");
fprintf(fppp, "the epoch number which yielded minimum training RMSE is %d \n", min_trn_RMSE_epoch);
}


fclose(fppp);
fclose(fpppp);

double *minmaxc;
minmaxc= (double *)calloc(2*In_n, sizeof(double));
	
	if((fpp = fopen("minmax.txt", "r")) == NULL)
	{
		printf("Cannot open 'parameter_file'.\n");
	}

	for(j = 0; j < 2*In_n; j++)
	{
		if(fscanf(fpp, "%lf", &tmp) != EOF) 
		{
				minmaxc[j] = tmp;
				
		} else 		{
			printf("Not enough data in 'input_parameter'!");
		}
	}

	fclose(fpp);
//////////////////////////////////////////////////////////////


	restore_parameter(parameter_array); //output.c
	write_parameter(FINA_PARA_FILE); //output.c
	write_array(trnNMSE, epoch_n, TRAIN_ERR_FILE); //lib.c
	if (checking_data_n != 0)
	{
		//printf("testing 3 \n");	
		epoch_checking_error(checking_data_matrix, checking_data_n, chk_error_n, chk_error_un, training_data_n, chk_output, ep_n -1, minmaxc); //chk_err.c  writes to tracking.txt
		//printf("testing 4 \n");	
		//chk_rmse_error[ep_n] = chk_error[Out_n];
		min_chk_RMSE_n = chk_error_n[Out_n];
		printf(" initial checking RMSE is %lf \n ", min_chk_RMSE_n);
		min_chk_RMSE_un = chk_error_un[Out_n];
		//for (i=0; i<Out_n; i++)
		//printf("%3d \t %.11f \t %.11f\n", ep_n+1, trn_error[i], chk_error[i]);
		//printf("%3d \t %.11f \n", ep_n+1, trn_error[i]);
			//printf("%.11f\t %.11f\n", trn_error[Out_n],chk_error[Out_n]);
		//printf("%.11f\t \n", trn_error[Out_n]);
		//write_result(min_trn_RMSE_epoch ,Out_n,trn_rmse_error,chk_error);  //debug.c writes to result.txt about the epoch number at which the stopping was done and the corresponding training RMSE and checking RMSE
	} 
	//write_array(chk_rmse_error, epoch_n, CHECK_ERR_FILE); //lib.c
	//}
	
	write_array(step_size_array, epoch_n, STEP_SIZE_FILE); //lib.c

/**************************************************************************
	min_chk_RMSE = chk_rmse_error[epoch_n -1];
	min_chk_RMSE_epoch = epoch_n -1;	
	for(j=0; j< epoch_n; j++)
	{
	if(chk_rmse_error[j]< min_chk_RMSE)
	{
	min_chk_RMSE = chk_rmse_error[j];
	min_chk_RMSE_epoch = j;
	}
	}
*************************************************************************/
/**************************************************************
	double minmaxc[2*In_n];
	
	if((fpp = fopen("minmax.txt", "r")) == NULL)
	{
		printf("Cannot open 'parameter_file'.\n");
	}

	for(j = 0; j < 2*In_n; j++)
	{
		if(fscanf(fpp, "%lf", &tmp) != EOF) 
		{
				minmaxc[j] = tmp;
				
		} else 		{
			printf("Not enough data in 'input_parameter'!");
		}
	}

	fclose(fpp);
***************************************************************************/
	for(k=0; k< Out_n; k++)
	{
	for(j=0; j< checking_data_n; j++)
		{
		 checking_data_matrix_un[j][k]= (checking_data_matrix[j][(k+1)*In_vect_n +k])* (minmaxc[(2*k) +1] - minmaxc[2*k]) + minmaxc[2*k];
		}
	}





// the following code calculates the cdavg_un and checking datat average bothe un normalized
for(k=0; k< Out_n; k++)
	{
	for(j=0; j< checking_data_n; j++)
		{
		checking_data_average_un = checking_data_average_un + checking_data_matrix_un[j][k];
		}
	cdavg_un[k]=checking_data_average_un/checking_data_n;
	checking_data_average_un_temp=checking_data_average_un_temp+checking_data_average_un;
	checking_data_average_un=0;
		}
	
	checking_data_average_un = checking_data_average_un_temp/(Out_n*checking_data_n);
	printf("%lf is the checking datat average non normalized\n", checking_data_average_un);





// the following code calcuates the chkvar_un un normalized 
for(k=0; k< Out_n; k++)
	{	
	for(j=0; j< checking_data_n; j++)
		{				
		temp= temp + (checking_data_matrix_un[j][k] - cdavg_un[k])*(checking_data_matrix_un[j][k] - cdavg_un[k]);
		}
	chkvar_un[k]=temp/(checking_data_n-1);
	//checking_variance_un = checking_variance_un + temp;
	temp=0;
	}




temp =0.0;
// the following code cacluates the un normalized checking varinace
for(j=0; j< checking_data_n; j++)
	{
	for(k=0; k< Out_n; k++)
		{
		temp = checking_data_matrix_un[j][k] - checking_data_average_un;
		temp = temp*temp;
		checking_variance_un = checking_variance_un + temp;
		}
	}
	checking_variance_un = checking_variance_un/((Out_n*checking_data_n)-1);
	printf("%lf is the checking variance non normalized \n", checking_variance_un);

temp =0.0;




checking_data_average_n=0.0;
checking_data_average_n_temp=0.0;
// the following code calculates the cdavg and checking data average both normalized
for(k=0; k< Out_n; k++)
	{	
for(j=0; j< checking_data_n; j++)
		{		
		checking_data_average_n = checking_data_average_n + checking_data_matrix[j][(k+1)*In_vect_n +k];
		}
		cdavg[k]=checking_data_average_n/checking_data_n;
		checking_data_average_n_temp=checking_data_average_n_temp+checking_data_average_n;
		checking_data_average_n=0;
	}
	checking_data_average_n = checking_data_average_n_temp/(Out_n*checking_data_n);
	printf("%lf is the checking datat average  normalized\n", checking_data_average_n);


temp =0.0;
checking_variance_n =0.0;
// the following code cacluates the normalized checking varinace
for(j=0; j< checking_data_n; j++)
	{
	for(k=0; k< Out_n; k++)
		{
		temp = checking_data_matrix[j][(k+1)*In_vect_n +k] - checking_data_average_n;
		temp = temp*temp;
		checking_variance_n = checking_variance_n + temp;
		}
	}
checking_variance_n = checking_variance_n/((Out_n*checking_data_n)-1);
temp = 0.0;
printf("%lf is the checking variance normalized \n", checking_variance_n);



// the following code calcuatres the normalized chkvar[k]
temp=0.0;
for(k=0; k< Out_n; k++)
	{
	for(j=0; j< checking_data_n; j++)
		{	
		temp= temp + (checking_data_matrix[j][(k+1)*In_vect_n +k] - cdavg[k])*(checking_data_matrix[j][(k+1)*In_vect_n +k] - cdavg[k]);
	}
	chkvar[k]=temp/(checking_data_n-1);
	//checking_variance_n = checking_variance_n + temp;
	temp=0;
	}


	
	
	

	NMSE_un = min_chk_RMSE_un * min_chk_RMSE_un / checking_variance_un;
	NMSE_n = min_chk_RMSE_n * min_chk_RMSE_n / checking_variance_n;
	NMSE_n2 = min_chk_RMSE_n * min_chk_RMSE_n / chkvariance;
	NDEI_un = sqrt(NMSE_un);
	NDEI_n = sqrt(NMSE_n);




	for(k=0;k<Out_n;k++)
	{
	NMSE[k]=chk_error_n[k]*chk_error_n[k]/chkvar[k];
	NDEI[k]=sqrt(NMSE[k]);
	unNMSE[k]=chk_error_un[k]*chk_error_un[k]/chkvar_un[k];
	unNDEI[k]=sqrt(unNMSE[k]);
	}






	write_result(min_trn_RMSE_epoch ,Out_n,trn_rmse_error,chk_error_un, chk_error_n, NDEI_un, NMSE_un, NDEI_n, NMSE_n, NMSE, NDEI, unNMSE, unNDEI); //debug.c writes to result.txt about the epoch number at which the stopping was done and the corresponding training RMSE and checking RMSE
	printf("Minimum training RMSE is \t %f \t \n",min_trn_RMSE); 
	printf("Minimum training RMSE epoch is \t %d \n",min_trn_RMSE_epoch); 
	printf("Minimum training NMSE is \t %f \t \n",min_trnNMSE); 
	//printf("Minimum training RMSE epoch is \t %d \n",min_trnNMSE_epoch); 
	//printf("Minimum training RMSE is \t %f \t \n",min_trn_RMSE); 
	//printf("Minimum training RMSE epoch is \t %d \n",min_trn_RMSE_epoch); 
	printf("%f \t is the checking RMSE non normalized\n",min_chk_RMSE_un);
	printf("%f \t is the checking RMSE normalized\n",min_chk_RMSE_n);
	//printf("%f \t is the checking RMSE normalized22222222 \n",min_chk_RMSE_n2);
	printf(" checking NMSE non normlized is %f \t NDEI non normalized is %f \n",NMSE_un, NDEI_un); 
	printf("checking NMSE normalized is %f \t NDEI normalized is %f \n",NMSE_n, NDEI_n); 
	printf("checking NMSE2 normalized is %f \n",NMSE_n2); 
	printf("traning data variance is  %f \n",trnvariance); 
	return(0);
예제 #28
0
static int write_image_internal (char *file, SLang_Array_Type *at,
				 int color_type,
				 void (*write_fun)(png_struct *, png_byte *p, SLindex_Type, png_byte *),
				 int flip)
{
   FILE *fp;
   Png_Type *p = NULL;
   png_struct *png;
   png_info *info;
   SLindex_Type width, height;
   png_byte **image_pointers;
   int bit_depth;
   int status = -1;
   png_byte *tmpbuf;

   bit_depth = 8;

   height = at->dims[0];
   width = at->dims[1];

   if (NULL == (image_pointers = allocate_image_pointers (height, (png_byte *)at->data, width * at->sizeof_type, flip)))
     return -1;

   if (NULL == (tmpbuf = (png_byte *)SLmalloc (4*width)))
     {
	free_image_pointers (image_pointers);
	return -1;
     }

   if (NULL == (fp = fopen (file, "wb")))
     {
	(void)SLerrno_set_errno (errno);
	SLang_verror (SL_Open_Error, "Unable to open %s", file);
	goto return_error;
     }

   if (NULL == (p = alloc_png_type ('w')))
     goto return_error;

   p->fp = fp;

   if (NULL == (p->png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL)))
     {
	SLang_verror (SL_Open_Error, "png_create_write_struct failed");
	goto return_error;
     }
   png = p->png;
   if (NULL == (p->info = png_create_info_struct (png)))
     {
	SLang_verror (SL_Open_Error, "png_create_info_struct failed");
	goto return_error;
     }
   info = p->info;
   if (setjmp(png_jmpbuf(png)))
     {
	SLang_verror (SL_Write_Error, "PNG I/O error");
	goto return_error;
     }
   png_init_io(png, fp);

   png_set_IHDR (png, info, width, height,
		 bit_depth, color_type, PNG_INTERLACE_NONE,
		 PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
   png_write_info(png, info);

   if (-1 == write_array (png, image_pointers, height, width, write_fun, tmpbuf))
     goto return_error;

   png_write_end(png, NULL);
   if (EOF == fclose (p->fp))
     {
	SLang_verror (SL_Write_Error, "Error closing %s", file);
	SLerrno_set_errno (errno);
     }
   else status = 0;

   p->fp = NULL;
   /* drop */
   return_error:
   if (tmpbuf != NULL)
     SLfree ((char *) tmpbuf);
   free_image_pointers (image_pointers);
   if (p != NULL)
     free_png_type (p);

   return status;
}
예제 #29
0
void plist_to_bin(plist_t plist, char **plist_bin, uint32_t * length)
{
    GPtrArray *objects = NULL;
    GHashTable *ref_table = NULL;
    struct serialize_s ser_s;
    uint8_t offset_size = 0;
    uint8_t dict_param_size = 0;
    uint64_t num_objects = 0;
    uint64_t root_object = 0;
    uint64_t offset_table_index = 0;
    GByteArray *bplist_buff = NULL;
    uint64_t i = 0;
    uint8_t *buff = NULL;
    uint64_t *offsets = NULL;
    uint8_t pad[6] = { 0, 0, 0, 0, 0, 0 };
    uint8_t trailer[BPLIST_TRL_SIZE];
    //for string
    glong len = 0;
    int type = 0;
    glong items_read = 0;
    glong items_written = 0;
    GError *error = NULL;
    gunichar2 *unicodestr = NULL;

    //check for valid input
    if (!plist || !plist_bin || *plist_bin || !length)
        return;

    //list of objects
    objects = g_ptr_array_new();
    //hashtable to write only once same nodes
    ref_table = g_hash_table_new(plist_data_hash, plist_data_compare);

    //serialize plist
    ser_s.objects = objects;
    ser_s.ref_table = ref_table;
    serialize_plist(plist, &ser_s);

    //now stream to output buffer
    offset_size = 0;			//unknown yet
    dict_param_size = get_needed_bytes(objects->len);
    num_objects = objects->len;
    root_object = 0;			//root is first in list
    offset_table_index = 0;		//unknown yet

    //setup a dynamic bytes array to store bplist in
    bplist_buff = g_byte_array_new();

    //set magic number and version
    g_byte_array_append(bplist_buff, BPLIST_MAGIC, BPLIST_MAGIC_SIZE);
    g_byte_array_append(bplist_buff, BPLIST_VERSION, BPLIST_VERSION_SIZE);

    //write objects and table
    offsets = (uint64_t *) malloc(num_objects * sizeof(uint64_t));
    for (i = 0; i < num_objects; i++)
    {

        plist_data_t data = plist_get_data(g_ptr_array_index(objects, i));
        offsets[i] = bplist_buff->len;

        switch (data->type)
        {
        case PLIST_BOOLEAN:
            buff = (uint8_t *) malloc(sizeof(uint8_t));
            buff[0] = data->boolval ? BPLIST_TRUE : BPLIST_FALSE;
            g_byte_array_append(bplist_buff, buff, sizeof(uint8_t));
            free(buff);
            break;

        case PLIST_UINT:
            write_int(bplist_buff, data->intval);
            break;

        case PLIST_REAL:
            write_real(bplist_buff, data->realval);
            break;

        case PLIST_KEY:
        case PLIST_STRING:
            len = strlen(data->strval);
            if ( is_ascii_string(data->strval, len) )
            {
                write_string(bplist_buff, data->strval);
            }
            else
            {
                unicodestr = g_utf8_to_utf16(data->strval, len, &items_read, &items_written, &error);
                write_unicode(bplist_buff, unicodestr, items_written);
                g_free(unicodestr);
            }
            break;
        case PLIST_DATA:
            write_data(bplist_buff, data->buff, data->length);
        case PLIST_ARRAY:
            write_array(bplist_buff, g_ptr_array_index(objects, i), ref_table, dict_param_size);
            break;
        case PLIST_DICT:
            write_dict(bplist_buff, g_ptr_array_index(objects, i), ref_table, dict_param_size);
            break;
        case PLIST_DATE:
            write_date(bplist_buff, data->timeval.tv_sec + (double) data->timeval.tv_usec / G_USEC_PER_SEC);
            break;
        default:
            break;
        }
    }

    //free intermediate objects
    g_hash_table_foreach_remove(ref_table, free_index, NULL);
    g_ptr_array_free(objects, TRUE);
    g_hash_table_destroy(ref_table);

    //write offsets
    offset_size = get_needed_bytes(bplist_buff->len);
    offset_table_index = bplist_buff->len;
    for (i = 0; i < num_objects; i++)
    {
        uint8_t *offsetbuff = (uint8_t *) malloc(offset_size);

#if G_BYTE_ORDER == G_BIG_ENDIAN
	offsets[i] = offsets[i] << ((sizeof(uint64_t) - offset_size) * 8);
#endif

        memcpy(offsetbuff, &offsets[i], offset_size);
        byte_convert(offsetbuff, offset_size);
        g_byte_array_append(bplist_buff, offsetbuff, offset_size);
        free(offsetbuff);
    }

    //experimental pad to reflect apple's files
    g_byte_array_append(bplist_buff, pad, 6);

    //setup trailer
    num_objects = GUINT64_FROM_BE(num_objects);
    root_object = GUINT64_FROM_BE(root_object);
    offset_table_index = GUINT64_FROM_BE(offset_table_index);

    memcpy(trailer + BPLIST_TRL_OFFSIZE_IDX, &offset_size, sizeof(uint8_t));
    memcpy(trailer + BPLIST_TRL_PARMSIZE_IDX, &dict_param_size, sizeof(uint8_t));
    memcpy(trailer + BPLIST_TRL_NUMOBJ_IDX, &num_objects, sizeof(uint64_t));
    memcpy(trailer + BPLIST_TRL_ROOTOBJ_IDX, &root_object, sizeof(uint64_t));
    memcpy(trailer + BPLIST_TRL_OFFTAB_IDX, &offset_table_index, sizeof(uint64_t));

    g_byte_array_append(bplist_buff, trailer, BPLIST_TRL_SIZE);

    //duplicate buffer
    *plist_bin = (char *) malloc(bplist_buff->len);
    memcpy(*plist_bin, bplist_buff->data, bplist_buff->len);
    *length = bplist_buff->len;

    g_byte_array_free(bplist_buff, TRUE);
    free(offsets);
}
예제 #30
0
static int write_datum(avro_writer_t writer, const avro_encoding_t * enc,
		       avro_schema_t writers_schema, avro_datum_t datum)
{
	if (is_avro_schema(writers_schema) && is_avro_link(writers_schema)) {
		return write_datum(writer, enc,
				   (avro_schema_to_link(writers_schema))->to,
				   datum);
	}

	switch (avro_typeof(datum)) {
	case AVRO_NULL:
		return enc->write_null(writer);

	case AVRO_BOOLEAN:
		return enc->write_boolean(writer,
					  avro_datum_to_boolean(datum)->i);

	case AVRO_STRING:
		return enc->write_string(writer,
					 avro_datum_to_string(datum)->s);

	case AVRO_BYTES:
		return enc->write_bytes(writer,
					avro_datum_to_bytes(datum)->bytes,
					avro_datum_to_bytes(datum)->size);

	case AVRO_INT32:
	case AVRO_INT64:{
			int64_t val = avro_typeof(datum) == AVRO_INT32 ?
			    avro_datum_to_int32(datum)->i32 :
			    avro_datum_to_int64(datum)->i64;
			if (is_avro_schema(writers_schema)) {
				/* handle promotion */
				if (is_avro_float(writers_schema)) {
					return enc->write_float(writer,
								(float)val);
				} else if (is_avro_double(writers_schema)) {
					return enc->write_double(writer,
								 (double)val);
				}
			}
			return enc->write_long(writer, val);
		}

	case AVRO_FLOAT:{
			float val = avro_datum_to_float(datum)->f;
			if (is_avro_schema(writers_schema)
			    && is_avro_double(writers_schema)) {
				/* handle promotion */
				return enc->write_double(writer, (double)val);
			}
			return enc->write_float(writer, val);
		}

	case AVRO_DOUBLE:
		return enc->write_double(writer,
					 avro_datum_to_double(datum)->d);

	case AVRO_RECORD:
		return write_record(writer, enc,
				    avro_schema_to_record(writers_schema),
				    datum);

	case AVRO_ENUM:
		return write_enum(writer, enc,
				  avro_schema_to_enum(writers_schema),
				  avro_datum_to_enum(datum));

	case AVRO_FIXED:
		return avro_write(writer,
				  avro_datum_to_fixed(datum)->bytes,
				  avro_datum_to_fixed(datum)->size);

	case AVRO_MAP:
		return write_map(writer, enc,
				 avro_schema_to_map(writers_schema),
				 avro_datum_to_map(datum));

	case AVRO_ARRAY:
		return write_array(writer, enc,
				   avro_schema_to_array(writers_schema),
				   avro_datum_to_array(datum));

	case AVRO_UNION:
		return write_union(writer, enc,
				   avro_schema_to_union(writers_schema),
				   avro_datum_to_union(datum));

	case AVRO_LINK:
		break;
	}

	return 0;
}