示例#1
0
void pyne::Material::_load_comp_protocol1(H5::H5File * db, std::string datapath, int row)
{
  H5::DataSet data_set = (*db).openDataSet(datapath);

  hsize_t data_offset[1] = {row};
  if (row < 0)
  {
    // Handle negative row indecies
    H5::DataSpace data_space = data_set.getSpace();
    hsize_t data_dims[1];
    int data_rank = data_space.getSimpleExtentDims(data_dims);
    data_offset[0] += data_dims[0];
  };

  // Grab the nucpath
  std::string nucpath;
  H5::Attribute nuc_attr = data_set.openAttribute("nucpath");
  hsize_t nuc_attr_len = nuc_attr.getStorageSize() / sizeof(char);
  H5::StrType nuc_attr_type(0, nuc_attr_len);
  nuc_attr.read(nuc_attr_type, nucpath);

  // Grab the nuclides
  std::vector<int> nuclides = h5wrap::h5_array_to_cpp_vector_1d<int>(db, nucpath, H5::PredType::NATIVE_INT);
  int nuc_size = nuclides.size();
  hsize_t nuc_dims[1] = {nuc_size};

  // Get the data hyperslab
  H5::DataSpace data_hyperslab = data_set.getSpace();
  hsize_t data_count[1] = {1};
  data_hyperslab.selectHyperslab(H5S_SELECT_SET, data_count, data_offset);

  // Get memory space for writing
  H5::DataSpace mem_space (1, data_count);

  // Get material type
  size_t material_struct_size = sizeof(pyne::material_struct) + sizeof(double)*(nuc_size);
  H5::CompType data_desc(material_struct_size);
  H5::ArrayType comp_values_array_type (H5::PredType::NATIVE_DOUBLE, 1, nuc_dims);

  // make the data table type
  data_desc.insertMember("name", HOFFSET(pyne::material_struct, name), H5::StrType(0, 20));
  data_desc.insertMember("mass", HOFFSET(pyne::material_struct, mass), H5::PredType::NATIVE_DOUBLE);
  data_desc.insertMember("atoms_per_mol", HOFFSET(pyne::material_struct, atoms_per_mol), H5::PredType::NATIVE_DOUBLE);
  data_desc.insertMember("comp", HOFFSET(pyne::material_struct, comp), comp_values_array_type);

  // make the data array, have to over-allocate
  material_struct * mat_data = (material_struct *) malloc(material_struct_size);

  // Finally, get data and put in on this instance
  data_set.read(mat_data, data_desc, mem_space, data_hyperslab);

  name = std::string((*mat_data).name);
  mass = (*mat_data).mass;
  atoms_per_mol = (*mat_data).atoms_per_mol;
  for (int i = 0; i < nuc_size; i++)
    comp[nuclides[i]] = (double) (*mat_data).comp[i];

  free(mat_data);
};
示例#2
0
/*
 * Validate the descriptor 'seg_desc' associated with 'segment'.
 *
 * Returns 0 on success.
 * Returns 1 if an exception was injected into the guest.
 * Returns -1 otherwise.
 */
static int
validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
    int segment, struct seg_desc *seg_desc)
{
	struct vm_guest_paging sup_paging;
	struct user_segment_descriptor usd;
	int error, idtvec;
	int cpl, dpl, rpl;
	uint16_t sel, cs;
	bool ldtseg, codeseg, stackseg, dataseg, conforming;

	ldtseg = codeseg = stackseg = dataseg = false;
	switch (segment) {
	case VM_REG_GUEST_LDTR:
		ldtseg = true;
		break;
	case VM_REG_GUEST_CS:
		codeseg = true;
		break;
	case VM_REG_GUEST_SS:
		stackseg = true;
		break;
	case VM_REG_GUEST_DS:
	case VM_REG_GUEST_ES:
	case VM_REG_GUEST_FS:
	case VM_REG_GUEST_GS:
		dataseg = true;
		break;
	default:
		assert(0);
	}

	/* Get the segment selector */
	sel = GETREG(ctx, vcpu, segment);

	/* LDT selector must point into the GDT */
	if (ldtseg && ISLDT(sel)) {
		sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
		return (1);
	}

	/* Descriptor table limit check */
	if (desc_table_limit_check(ctx, vcpu, sel)) {
		sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
		return (1);
	}

	/* NULL selector */
	if (IDXSEL(sel) == 0) {
		/* Code and stack segment selectors cannot be NULL */
		if (codeseg || stackseg) {
			sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
			return (1);
		}
		seg_desc->base = 0;
		seg_desc->limit = 0;
		seg_desc->access = 0x10000;	/* unusable */
		return (0);
	}

	/* Read the descriptor from the GDT/LDT */
	sup_paging = ts->paging;
	sup_paging.cpl = 0;	/* implicit supervisor mode */
	error = desc_table_read(ctx, vcpu, &sup_paging, sel, &usd);
	if (error)
		return (error);

	/* Verify that the descriptor type is compatible with the segment */
	if ((ldtseg && !ldt_desc(usd.sd_type)) ||
	    (codeseg && !code_desc(usd.sd_type)) ||
	    (dataseg && !data_desc(usd.sd_type)) ||
	    (stackseg && !stack_desc(usd.sd_type))) {
		sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
		return (1);
	}

	/* Segment must be marked present */
	if (!usd.sd_p) {
		if (ldtseg)
			idtvec = IDT_TS;
		else if (stackseg)
			idtvec = IDT_SS;
		else
			idtvec = IDT_NP;
		sel_exception(ctx, vcpu, idtvec, sel, ts->ext);
		return (1);
	}

	cs = GETREG(ctx, vcpu, VM_REG_GUEST_CS);
	cpl = cs & SEL_RPL_MASK;
	rpl = sel & SEL_RPL_MASK;
	dpl = usd.sd_dpl;

	if (stackseg && (rpl != cpl || dpl != cpl)) {
		sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
		return (1);
	}

	if (codeseg) {
		conforming = (usd.sd_type & 0x4) ? true : false;
		if ((conforming && (cpl < dpl)) ||
		    (!conforming && (cpl != dpl))) {
			sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
			return (1);
		}
	}

	if (dataseg) {
		/*
		 * A data segment is always non-conforming except when it's
		 * descriptor is a readable, conforming code segment.
		 */
		if (code_desc(usd.sd_type) && (usd.sd_type & 0x4) != 0)
			conforming = true;
		else
			conforming = false;

		if (!conforming && (rpl > dpl || cpl > dpl)) {
			sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
			return (1);
		}
	}
	*seg_desc = usd_to_seg_desc(&usd);
	return (0);
}
示例#3
0
void pyne::Material::write_hdf5(std::string filename, std::string datapath, std::string nucpath, float row, int chunksize)
{
  // Turn off annoying HDF5 errors
  H5::Exception::dontPrint();

  // Create new/open datafile.
  H5::H5File db;
  if (pyne::file_exists(filename))
  {
    bool isH5 = H5::H5File::isHdf5(filename);
    if (!isH5)
      throw h5wrap::FileNotHDF5(filename);
    db = H5::H5File(filename, H5F_ACC_RDWR);
  }
  else
    db = H5::H5File(filename, H5F_ACC_TRUNC);

  //
  // Read in nuclist if available, write it out if not
  //
  bool nucpath_exists = h5wrap::path_exists(&db, nucpath);
  std::vector<int> nuclides;
  int nuc_size;
  hsize_t nuc_dims[1];
  
  if (nucpath_exists)
  {
    nuclides = h5wrap::h5_array_to_cpp_vector_1d<int>(&db, nucpath, H5::PredType::NATIVE_INT);
    nuc_size = nuclides.size();
    nuc_dims[0] = nuc_size;
  }
  else
  {
    nuclides = std::vector<int>();
    for (pyne::comp_iter i = comp.begin(); i != comp.end(); i++)
      nuclides.push_back(i->first);
    nuc_size = nuclides.size();

    // Create the data if it doesn't exist
    int nuc_data [nuc_size];
    for (int n = 0; n != nuc_size; n++)
      nuc_data[n] = nuclides[n];
    nuc_dims[0] = nuc_size;
    H5::DataSpace nuc_space(1, nuc_dims);
    H5::DataSet nuc_set = db.createDataSet(nucpath, H5::PredType::NATIVE_INT, nuc_space);
    nuc_set.write(nuc_data, H5::PredType::NATIVE_INT);
    db.flush(H5F_SCOPE_GLOBAL);
  };



  //
  // Write out to the file
  //
  H5::DataSet data_set;
  H5::DataSpace data_space, data_hyperslab;
  int data_rank = 1;
  hsize_t data_dims[1] = {1};
  hsize_t data_max_dims[1] = {H5S_UNLIMITED};
  hsize_t data_offset[1] = {0};

  size_t material_struct_size = sizeof(pyne::material_struct) + sizeof(double)*(nuc_size);
  H5::CompType data_desc(material_struct_size);
  H5::ArrayType comp_values_array_type (H5::PredType::NATIVE_DOUBLE, 1, nuc_dims);

  // make the data table type
  data_desc.insertMember("name", HOFFSET(pyne::material_struct, name), H5::StrType(0, 20));
  data_desc.insertMember("mass", HOFFSET(pyne::material_struct, mass), H5::PredType::NATIVE_DOUBLE);
  data_desc.insertMember("atoms_per_mol", HOFFSET(pyne::material_struct, atoms_per_mol), H5::PredType::NATIVE_DOUBLE);
  data_desc.insertMember("comp", HOFFSET(pyne::material_struct, comp), comp_values_array_type);

  // make the data array, have to over-allocate
  material_struct * mat_data  = (material_struct *) malloc(material_struct_size);
  int name_len = name.length();
  for (int i=0; i < 20; i++)
  {
    if (i < name_len)
      (*mat_data).name[i] = name[i];
    else
      (*mat_data).name[i] = NULL;
  };
  (*mat_data).mass = mass;
  (*mat_data).atoms_per_mol = atoms_per_mol;
  for (int n = 0; n != nuc_size; n++)
  {
    if (0 < comp.count(nuclides[n]))
      (*mat_data).comp[n] = comp[nuclides[n]];
    else
      (*mat_data).comp[n] = 0.0;
  };

  // get / make the data set
  bool datapath_exists = h5wrap::path_exists(&db, datapath);
  if (datapath_exists)
  {
    data_set = db.openDataSet(datapath);
    data_space = data_set.getSpace();
    data_rank = data_space.getSimpleExtentDims(data_dims, data_max_dims);

    // Determine the row size.
    int row_num = (int) row;

    if (std::signbit(row))
      row_num = data_dims[0] + row;  // careful, row is negative

    if (data_dims[0] <= row_num)
    {
      // row == -0, extend to data set so that we can append, or
      // row_num is larger than current dimension, resize to accomodate.
      data_dims[0] = row_num + 1;
      data_set.extend(data_dims);
    }
    else if (data_dims[0] < 0)
      throw h5wrap::HDF5BoundsError();

    data_offset[0] = row_num;
  }
  else
  {
    // Get full space
    data_space = H5::DataSpace(1, data_dims, data_max_dims);

    // Make data set properties to enable chunking
    H5::DSetCreatPropList data_set_params;
    hsize_t chunk_dims[1] ={chunksize}; 
    data_set_params.setChunk(1, chunk_dims);

    material_struct * data_fill_value  = (material_struct *) malloc(material_struct_size);
    for (int i=0; i < 20; i++)
      (*data_fill_value).name[i] = NULL;
    (*data_fill_value).mass = -1.0;
    (*data_fill_value).atoms_per_mol = -1.0;
    for (int n = 0; n != nuc_size; n++)
      (*data_fill_value).comp[n] = 0.0;
    data_set_params.setFillValue(data_desc, &data_fill_value);

    // Create the data set
    data_set = db.createDataSet(datapath, data_desc, data_space, data_set_params);
    data_set.extend(data_dims);

    // Add attribute pointing to nuc path
    H5::StrType nuc_attr_type(0, nucpath.length());
    H5::DataSpace nuc_attr_space(H5S_SCALAR);
    H5::Attribute nuc_attr = data_set.createAttribute("nucpath", nuc_attr_type, nuc_attr_space);
    nuc_attr.write(nuc_attr_type, nucpath);

    // Remember to de-allocate
    free(data_fill_value);
  };

  // Get the data hyperslab
  data_hyperslab = data_set.getSpace();
  hsize_t data_count[1] = {1};
  data_hyperslab.selectHyperslab(H5S_SELECT_SET, data_count, data_offset);

  // Get a memory space for writing
  H5::DataSpace mem_space (1, data_count, data_max_dims);

  // Write the row...
  data_set.write(mat_data, data_desc, mem_space, data_hyperslab);

  // Close out the HDF5 file
  db.close();

  // Remember the milk!  
  // ...by which I mean to deallocate
  free(mat_data);
};