Beispiel #1
0
void
write_attributes(bool all, bool ignore_delay)
{
    GHashTableIter iter;
    attribute_t *a = NULL;

    crm_debug("Writing out %s attributes", all? "all" : "changed");
    g_hash_table_iter_init(&iter, attributes);
    while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) {
        if (!all && a->unknown_peer_uuids) {
            // Try writing this attribute again, in case peer ID was learned
            a->changed = TRUE;
        } else if (a->force_write) {
            /* If the force_write flag is set, write the attribute. */
            a->changed = TRUE;
        }

        if(all || a->changed) {
            /* When forced write flag is set, ignore delay. */
            write_attribute(a, (a->force_write ? TRUE : ignore_delay));
        } else {
            crm_trace("Skipping unchanged attribute %s", a->id);
        }
    }
}
 bool write_value(const char* name, int value) {
     char buf[10];
     itoa(value, buf, 10);
     return write_attribute(name) &&
            write(buf) &&
            next();
 }
Beispiel #3
0
 array_proxy ( FileGroupType file_group, std::string const & name) :
  indexmap_ ( indexmap_type(file_group.openDataSet( name.c_str() ).getSpace(),T_is_complex) ) { 
   if (!h5::exists(file_group, name)) TRIQS_RUNTIME_ERROR<< " h5 : no dataset"<< name << " in file "; 
   storage_ = std::make_pair( boost::make_shared<FileGroupType>(file_group),name);
   DataSet dataset = file_group.openDataSet( name.c_str() );
   try { if (T_is_complex) write_attribute(dataset,"__complex__","1"); } 
   catch (...) {} // catch if the attribute already exists...
  }
Beispiel #4
0
static void
attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
    int level = LOG_ERR;
    GHashTableIter iter;
    const char *peer = NULL;
    attribute_value_t *v = NULL;

    char *name = user_data;
    attribute_t *a = g_hash_table_lookup(attributes, name);

    if(a == NULL) {
        crm_info("Attribute %s no longer exists", name);
        goto done;
    }

    a->update = 0;
    if (rc == pcmk_ok && call_id < 0) {
        rc = call_id;
    }

    switch (rc) {
        case pcmk_ok:
            level = LOG_INFO;
            last_cib_op_done = call_id;
            break;
        case -pcmk_err_diff_failed:    /* When an attr changes while the CIB is syncing */
        case -ETIME:           /* When an attr changes while there is a DC election */
        case -ENXIO:           /* When an attr changes while the CIB is syncing a
                                *   newer config from a node that just came up
                                */
            level = LOG_WARNING;
            break;
    }

    do_crm_log(level, "Update %d for %s: %s (%d)", call_id, name, pcmk_strerror(rc), rc);

    g_hash_table_iter_init(&iter, a->values);
    while (g_hash_table_iter_next(&iter, (gpointer *) & peer, (gpointer *) & v)) {
        crm_notice("Update %d for %s[%s]=%s: %s (%d)", call_id, a->id, peer, v->requested, pcmk_strerror(rc), rc);

        if(rc == pcmk_ok) {
            free(v->stored);
            v->stored = v->requested;
            v->requested = NULL;

        } else {
            free(v->requested);
            v->requested = NULL;
            a->changed = TRUE; /* Attempt write out again */
        }
    }
  done:
    free(name);
    if(a && a->changed && election_state(writer) == election_won) {
        write_attribute(a);
    }
}
Beispiel #5
0
void
write_or_elect_attribute(attribute_t *a)
{
    if (attrd_election_won()) {
        write_attribute(a, FALSE);
    } else {
        attrd_start_election_if_needed();
    }
}
Beispiel #6
0
   array_proxy ( FileGroupType file_group, std::string const & name_, LengthType L, bool overwrite = false) :
    indexmap_ ( indexmap_type (L) ) 
 { 
  if (h5::exists(file_group, name_)) {
   if (overwrite) file_group.unlink(name_.c_str());  
   else TRIQS_RUNTIME_ERROR<< " h5 : dataset"<< name_ << " already exists in the file "; 
  }
  storage_ = std::make_pair( boost::make_shared<FileGroupType>(file_group),name_);
  DataSpace ds  = indexmap_.template dataspace<T_is_complex>(); //(indexmap_type::rank_full, &indexmap_.lengths()[0], &indexmap_.strides()[0]  );
  DataSet dataset = file_group.createDataSet( name_.c_str(), data_type_file(ValueType()), ds);
  if (T_is_complex) write_attribute(dataset,"__complex__","1");
 }
Beispiel #7
0
void gatt_write_attribute(GDBusProxy *proxy, const char *arg)
{
	const char *iface;

	iface = g_dbus_proxy_get_interface(proxy);
	if (!strcmp(iface, "org.bluez.GattCharacteristic1") ||
				!strcmp(iface, "org.bluez.GattDescriptor1")) {
		write_attribute(proxy, (char *) arg);
		return;
	}

	rl_printf("Unable to write attribute %s\n",
						g_dbus_proxy_get_path(proxy));
}
Beispiel #8
0
void
write_or_elect_attribute(attribute_t *a)
{
    enum election_result rc = election_state(writer);
    if(rc == election_won) {
        write_attribute(a);

    } else if(rc == election_in_progress) {
        crm_trace("Election in progress to determine who will write out %s", a->id);

    } else if(peer_writer == NULL) {
        crm_info("Starting an election to determine who will write out %s", a->id);
        election_vote(writer);

    } else {
        crm_trace("%s will write out %s, we are in state %d", peer_writer, a->id, rc);
    }
}
 void write_array (group_or_file f, std::string const & name, ArrayType const & A, bool C_reorder = true) {
  try {
   if (h5::exists(f, name)) f->unlink( name.c_str());  // put some option here ?
   DataSet ds;
   if (C_reorder) {
    BOOST_AUTO(C, make_const_cache(A,Option::C()));
    ds = f->createDataSet( name.c_str(), data_type_file(typename ArrayType::value_type()), data_space(C.view()) );
    ds.write( data(C.view()), data_type_mem(A), data_space(C.view()) );
   }
   else {
    ds = f->createDataSet( name.c_str(), data_type_file(typename ArrayType::value_type()), data_space(A) );
    ds.write( data(A), data_type_mem(A), data_space(A) );
   }
   // if complex, to be python compatible, we add the __complex__ attribute
   if (boost::is_complex<typename ArrayType::value_type>::value)  write_attribute(ds,"__complex__","1");
  }
  TRIQS_ARRAYS_H5_CATCH_EXCEPTION;
 }
Beispiel #10
0
int main(int argc, char** argv)
{
    std::string filename = argv[0];
    filename.append(".h5");

    // set-up data as Boost.MultiArray
    array_t array(boost::extents[2][3][2]);
    int data[] = {99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, 88};
    array.assign(data, data + sizeof(data) / sizeof(int));

    // write to HDF5 file
    write_attribute(filename, array);

    // read from HDF5 file
    read_attribute(filename);

    return 0;
}
Beispiel #11
0
void
write_attributes(bool all, bool peer_discovered)
{
    GHashTableIter iter;
    attribute_t *a = NULL;

    g_hash_table_iter_init(&iter, attributes);
    while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) {
        if (peer_discovered && a->unknown_peer_uuids) {
            /* a new peer uuid has been discovered, try writing this attribute again. */
            a->changed = TRUE;
        }

        if(all || a->changed) {
            write_attribute(a);
        } else {
            crm_debug("Skipping unchanged attribute %s", a->id);
        }
    }
}
Beispiel #12
0
/*
 * In a group, creates NDATASETS datasets.  Each process writes a hyperslab
 * of a data array to the file.
 *
 * Changes:     Updated function to use a dynamically calculated size,
 *              instead of the old SIZE #define.  This should allow it
 *              to function with an arbitrary number of processors.
 *
 *                                              JRM - 8/16/04
 */
void write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
{
    int i, j, n, size;
    int mpi_rank, mpi_size;
    char dname[32];
    DATATYPE * outme = NULL;
    hid_t did;

    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);

    size = get_size();

    outme = HDmalloc((size_t)(size * size * sizeof(double)));
    VRFY((outme != NULL), "HDmalloc succeeded for outme");

    for(n=0; n < NDATASET; n++) {
         sprintf(dname, "dataset%d", n);
         did = H5Dcreate(gid, dname, H5T_NATIVE_INT, filespace,
                         H5P_DEFAULT);
         VRFY((did > 0), dname);

         for(i=0; i < size; i++)
             for(j=0; j < size; j++)
     	         outme[(i * size) + j] = n*1000 + mpi_rank;

         H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
                  outme);

         /* create attribute for these datasets.*/
         write_attribute(did, is_dset, n);

         H5Dclose(did);
    }
    HDfree(outme);
}
 bool end_list() {
     return write_attribute("_") &&
            write_quoted("");
 }
 bool write_string(const char* name, const char* value) {
     return write_attribute(name) &&
            write_quoted(value) &&
            next();
 }
Beispiel #15
0
static void
attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
{
    int level = LOG_ERR;
    GHashTableIter iter;
    const char *peer = NULL;
    attribute_value_t *v = NULL;

    char *name = user_data;
    attribute_t *a = g_hash_table_lookup(attributes, name);

    if(a == NULL) {
        crm_info("Attribute %s no longer exists", name);
        return;
    }

    a->update = 0;
    if (rc == pcmk_ok && call_id < 0) {
        rc = call_id;
    }

    switch (rc) {
        case pcmk_ok:
            level = LOG_INFO;
            last_cib_op_done = call_id;
            if (a->timer && !a->timeout_ms) {
                // Remove temporary dampening for failed writes
                mainloop_timer_del(a->timer);
                a->timer = NULL;
            }
            break;

        case -pcmk_err_diff_failed:    /* When an attr changes while the CIB is syncing */
        case -ETIME:           /* When an attr changes while there is a DC election */
        case -ENXIO:           /* When an attr changes while the CIB is syncing a
                                *   newer config from a node that just came up
                                */
            level = LOG_WARNING;
            break;
    }

    do_crm_log(level, "CIB update %d result for %s: %s " CRM_XS " rc=%d",
               call_id, a->id, pcmk_strerror(rc), rc);

    g_hash_table_iter_init(&iter, a->values);
    while (g_hash_table_iter_next(&iter, (gpointer *) & peer, (gpointer *) & v)) {
        do_crm_log(level, "* %s[%s]=%s", a->id, peer, v->requested);
        free(v->requested);
        v->requested = NULL;
        if (rc != pcmk_ok) {
            a->changed = TRUE; /* Attempt write out again */
        }
    }

    if (a->changed && attrd_election_won()) {
        if (rc == pcmk_ok) {
            /* We deferred a write of a new update because this update was in
             * progress. Write out the new value without additional delay.
             */
            write_attribute(a, FALSE);

        /* We're re-attempting a write because the original failed; delay
         * the next attempt so we don't potentially flood the CIB manager
         * and logs with a zillion attempts per second.
         *
         * @TODO We could elect a new writer instead. However, we'd have to
         * somehow downgrade our vote, and we'd still need something like this
         * if all peers similarly fail to write this attribute (which may
         * indicate a corrupted attribute entry rather than a CIB issue).
         */
        } else if (a->timer) {
            // Attribute has a dampening value, so use that as delay
            if (!mainloop_timer_running(a->timer)) {
                crm_trace("Delayed re-attempted write (%dms) for %s",
                          a->timeout_ms, name);
                mainloop_timer_start(a->timer);
            }
        } else {
            /* Set a temporary dampening of 2 seconds (timer will continue
             * to exist until the attribute's dampening gets set or the
             * write succeeds).
             */
            a->timer = mainloop_timer_add(a->id, 2000, FALSE,
                                          attribute_timer_cb, a);
            mainloop_timer_start(a->timer);
        }
    }
}
Beispiel #16
0
/*
 * Example of using PHDF5 to create multiple groups.  Under the root group,
 * it creates ngroups groups.  Under the first group just created, it creates
 * recursive subgroups of depth GROUP_DEPTH.  In each created group, it
 * generates NDATASETS datasets.  Each process write a hyperslab of an array
 * into the file.  The structure is like
 *
 *                             root group
 *                                 |
 *            ---------------------------- ... ... ------------------------
 *           |          |         |        ... ...  |                      |
 *       group0*+'   group1*+' group2*+'   ... ...             group ngroups*+'
 *           |
 *      1st_child_group*'
 *           |
 *      2nd_child_group*'
 *           |
 *           :
 *           :
 *           |
 * GROUP_DEPTHth_child_group*'
 *
 *      * means the group has dataset(s).
 *      + means the group has attribute(s).
 *      ' means the datasets in the groups have attribute(s).
 *
 * Changes:     Updated function to use a dynamically calculated size,
 *              instead of the old SIZE #define.  This should allow it
 *              to function with an arbitrary number of processors.
 *
 *                                              JRM - 8/16/04
 */
void multiple_group_write(void)
{
    int mpi_rank, mpi_size, size;
    int m;
    hbool_t use_gpfs = FALSE;
    char gname[64];
    hid_t fid, gid, plist, memspace, filespace;
    hsize_t chunk_origin[DIM];
    hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
    herr_t ret;
    const H5Ptest_param_t *pt;
    char	*filename;
    int		ngroups;

    pt = GetTestParameters();
    filename = pt->name;
    ngroups = pt->count;

    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);

    size = get_size();

    plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs);
    fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
    H5Pclose(plist);

    /* decide the hyperslab according to process number. */
    get_slab(chunk_origin, chunk_dims, count, file_dims, size);

    /* select hyperslab in memory and file spaces.  These two operations are
     * identical since the datasets are the same. */
    memspace  = H5Screate_simple(DIM, file_dims, NULL);
    VRFY((memspace>=0), "memspace");
    ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
                               chunk_dims, count, chunk_dims);
    VRFY((ret>=0), "mgroup memspace selection");

    filespace = H5Screate_simple(DIM, file_dims,  NULL);
    VRFY((filespace>=0), "filespace");
    ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
                               chunk_dims, count, chunk_dims);
    VRFY((ret>=0), "mgroup filespace selection");

    /* creates ngroups groups under the root group, writes datasets in
     * parallel. */
    for(m = 0; m < ngroups; m++) {
        sprintf(gname, "group%d", m);
        gid = H5Gcreate(fid, gname, 0);
        VRFY((gid > 0), gname);

        /* create attribute for these groups. */
	write_attribute(gid, is_group, m);

        if(m != 0)
	    write_dataset(memspace, filespace, gid);

        H5Gclose(gid);

#ifdef BARRIER_CHECKS
        if(! ((m+1) % 10)) {
            printf("created %d groups\n", m+1);
            MPI_Barrier(MPI_COMM_WORLD);
	}
#endif /* BARRIER_CHECKS */
    }

    /* recursively creates subgroups under the first group. */
    gid = H5Gopen(fid, "group0");
    create_group_recursive(memspace, filespace, gid, 0);
    ret = H5Gclose(gid);
    VRFY((ret>=0), "H5Gclose");

    ret = H5Sclose(filespace);
    VRFY((ret>=0), "H5Sclose");
    ret = H5Sclose(memspace);
    VRFY((ret>=0), "H5Sclose");
    ret = H5Fclose(fid);
    VRFY((ret>=0), "H5Fclose");
}