unsigned long
Generic_obj_space<SPACE>::v_delete(Page_number virt, Size size,
                                   unsigned long page_attribs = L4_fpage::CRWSD)
{
  (void)size;
  assert (size.value() == 1);

  Entry *c;
  if (Optimize_local
      && mem_space() == Mem_space::current_mem_space(current_cpu()))
    {
      c = cap_virt(virt.value());
      if (!c)
	return 0;

      Capability cap = Mem_layout::read_special_safe((Capability*)c);
      if (!cap.valid())
	return 0;
    }
  else
    c = get_cap(virt.value());

  if (c && c->valid())
    {
      if (page_attribs & L4_fpage::R)
        c->invalidate();
      else
        c->del_rights(page_attribs & L4_fpage::CWSD);
    }

  return 0;
}
bool
Generic_obj_space<SPACE>::v_lookup(Addr const &virt, Phys_addr *phys = 0,
                                   Size *size = 0, unsigned *attribs = 0)
{
  if (size) size->set_value(1);
  Entry *cap;

  if (Optimize_local
      && mem_space() == Mem_space::current_mem_space(current_cpu()))
    cap = cap_virt(virt.value());
  else
    cap = get_cap(virt.value());

  if (EXPECT_FALSE(!cap))
    {
      if (size) size->set_value(Caps_per_page);
      return false;
    }

  if (Optimize_local)
    {
      Capability c = Mem_layout::read_special_safe((Capability*)cap);

      if (phys) *phys = c.obj();
      if (c.valid() && attribs) *attribs = c.rights();
      return c.valid();
    }
  else
    {
      Obj::set_entry(virt, cap);
      if (phys) *phys = cap->obj();
      if (cap->valid() && attribs) *attribs = cap->rights();
      return cap->valid();
    }
}
void
Generic_obj_space<SPACE>::caps_free()
{
  Mem_space *ms = mem_space();
  if (EXPECT_FALSE(!ms || !ms->dir()))
    return;

  Mapped_allocator *a = Mapped_allocator::allocator();
  for (unsigned long i = 0; i < map_max_address().value();
       i += Caps_per_page)
    {
      Entry *c = get_cap(i);
      if (!c)
	continue;

      Address cp = Address(ms->virt_to_phys(Address(c)));
      assert_kdb (cp != ~0UL);
      void *cv = (void*)Mem_layout::phys_to_pmem(cp);
      remove_dbg_info(cv);

      a->q_unaligned_free(ram_quota(), Config::PAGE_SIZE, cv);
    }
#if defined (CONFIG_ARM)
  ms->dir()->free_page_tables((void*)Mem_layout::Caps_start, (void*)Mem_layout::Caps_end);
#else
  ms->dir()->Pdir::alloc_cast<Mem_space_q_alloc>()
    ->destroy(Virt_addr(Mem_layout::Caps_start),
              Virt_addr(Mem_layout::Caps_end), Pdir::Depth - 1,
              Mem_space_q_alloc(ram_quota(), Mapped_allocator::allocator()));
#endif
}
Exemple #4
0
void pyne::Material::_load_comp_protocol1(H5::H5File * db, std::string datapath, int row)
{
  H5::DataSet data_set = (*db).openDataSet(datapath);

  hsize_t data_offset[1] = {row};
  if (row < 0)
  {
    // Handle negative row indecies
    H5::DataSpace data_space = data_set.getSpace();
    hsize_t data_dims[1];
    int data_rank = data_space.getSimpleExtentDims(data_dims);
    data_offset[0] += data_dims[0];
  };

  // Grab the nucpath
  std::string nucpath;
  H5::Attribute nuc_attr = data_set.openAttribute("nucpath");
  hsize_t nuc_attr_len = nuc_attr.getStorageSize() / sizeof(char);
  H5::StrType nuc_attr_type(0, nuc_attr_len);
  nuc_attr.read(nuc_attr_type, nucpath);

  // Grab the nuclides
  std::vector<int> nuclides = h5wrap::h5_array_to_cpp_vector_1d<int>(db, nucpath, H5::PredType::NATIVE_INT);
  int nuc_size = nuclides.size();
  hsize_t nuc_dims[1] = {nuc_size};

  // Get the data hyperslab
  H5::DataSpace data_hyperslab = data_set.getSpace();
  hsize_t data_count[1] = {1};
  data_hyperslab.selectHyperslab(H5S_SELECT_SET, data_count, data_offset);

  // Get memory space for writing
  H5::DataSpace mem_space (1, data_count);

  // Get material type
  size_t material_struct_size = sizeof(pyne::material_struct) + sizeof(double)*(nuc_size);
  H5::CompType data_desc(material_struct_size);
  H5::ArrayType comp_values_array_type (H5::PredType::NATIVE_DOUBLE, 1, nuc_dims);

  // make the data table type
  data_desc.insertMember("name", HOFFSET(pyne::material_struct, name), H5::StrType(0, 20));
  data_desc.insertMember("mass", HOFFSET(pyne::material_struct, mass), H5::PredType::NATIVE_DOUBLE);
  data_desc.insertMember("atoms_per_mol", HOFFSET(pyne::material_struct, atoms_per_mol), H5::PredType::NATIVE_DOUBLE);
  data_desc.insertMember("comp", HOFFSET(pyne::material_struct, comp), comp_values_array_type);

  // make the data array, have to over-allocate
  material_struct * mat_data = (material_struct *) malloc(material_struct_size);

  // Finally, get data and put in on this instance
  data_set.read(mat_data, data_desc, mem_space, data_hyperslab);

  name = std::string((*mat_data).name);
  mass = (*mat_data).mass;
  atoms_per_mol = (*mat_data).atoms_per_mol;
  for (int i = 0; i < nuc_size; i++)
    comp[nuclides[i]] = (double) (*mat_data).comp[i];

  free(mat_data);
};
typename Generic_obj_space<SPACE>::Entry *
Generic_obj_space<SPACE>::alien_lookup(Address index)
{
  Mem_space *ms = mem_space();

  Address phys = Address(ms->virt_to_phys((Address)cap_virt(index)));
  if (EXPECT_FALSE(phys == ~0UL))
    return 0;

  return reinterpret_cast<Entry*>(Mem_layout::phys_to_pmem(phys));
}
typename Generic_obj_space<SPACE>::Capability
Generic_obj_space<SPACE>::lookup(Address virt)
{
  Capability *c;
  virt &= ~(~0UL << Whole_space);

  if (mem_space() == Mem_space::current_mem_space(current_cpu()))
    c = reinterpret_cast<Capability*>(cap_virt(virt));
  else
    c = get_cap(virt);

  if (EXPECT_FALSE(!c))
    return Capability(0); // void

  return Mem_layout::read_special_safe(c);
}
typename Generic_obj_space<SPACE>::Status
Generic_obj_space<SPACE>::v_insert(Phys_addr phys, Addr const &virt, Size size,
                                   unsigned char page_attribs)
{
  (void)size;
  assert (size.value() == 1);

  Entry *c;

  if (Optimize_local
      && mem_space() == Mem_space::current_mem_space(current_cpu()))
    {
      c = cap_virt(virt.value());
      if (!c)
	return Insert_err_nomem;

      Capability cap;
      if (!Mem_layout::read_special_safe((Capability*)c, cap)
	  && !caps_alloc(virt.value()))
	return Insert_err_nomem;
    }
  else
    {
      c = alien_lookup(virt.value());
      if (!c && !(c = caps_alloc(virt.value())))
	return Insert_err_nomem;
      Obj::set_entry(virt, c);
    }

  if (c->valid())
    {
      if (c->obj() == phys)
	{
	  if (EXPECT_FALSE(c->rights() == page_attribs))
	    return Insert_warn_exists;

	  c->add_rights(page_attribs);
	  return Insert_warn_attrib_upgrade;
	}
      else
	return Insert_err_exists;
    }

  c->set(phys, page_attribs);
  return Insert_ok;
}
/*inline NEEDS["mapped_alloc.h", <cstring>, "ram_quota.h",
                     Generic_obj_space::cap_virt]*/
typename Generic_obj_space<SPACE>::Entry *
Generic_obj_space<SPACE>::caps_alloc(Address virt)
{
  Address cv = (Address)cap_virt(virt);
  void *mem = Mapped_allocator::allocator()->q_unaligned_alloc(ram_quota(), Config::PAGE_SIZE);

  if (!mem)
    return 0;

  add_dbg_info(mem, this, virt);

  Mem::memset_mwords(mem, 0, Config::PAGE_SIZE / sizeof(Mword));

  Mem_space::Status s;
  s = mem_space()->v_insert(
      Mem_space::Phys_addr::create(Mem_space::kernel_space()->virt_to_phys((Address)mem)),
      Mem_space::Addr::create(cv).trunc(Mem_space::Size::create(Config::PAGE_SIZE)),
      Mem_space::Size::create(Config::PAGE_SIZE),
      Mem_space::Page_cacheable | Mem_space::Page_writable
      | Mem_space::Page_referenced | Mem_space::Page_dirty);

  switch (s)
    {
    case Insert_ok:
    case Insert_warn_exists:
    case Insert_warn_attrib_upgrade:
    case Insert_err_exists:
      break;
    case Insert_err_nomem:
      Mapped_allocator::allocator()->q_unaligned_free(ram_quota(),
          Config::PAGE_SIZE, mem);
      return 0;
    };

  unsigned long cap = cv & (Config::PAGE_SIZE - 1) | (unsigned long)mem;

  return reinterpret_cast<Entry*>(cap);
}
IMPLEMENTATION [io && (ia32 || amd64 || ux)]:

//
// disassamble IO statements to compute the port address and
// the number of ports accessed
//

/** Compute port number and size for an IO instruction.
    @param eip address of the instruction
    @param ts thread state with registers
    @param port return port address
    @param size return number of ports accessed
    @return true if the instruction was handled successfully
      false otherwise
*/
bool
Thread::get_ioport(Address eip, Trap_state *ts, unsigned *port, unsigned *size)
{
  int from_user = ts->cs() & 3;

  // handle 1 Byte IO
  switch (mem_space()->peek((Unsigned8*)eip, from_user))
    {
    case 0xec:			// in dx, al
    case 0x6c:			// insb
    case 0xee:			// out dx, al
    case 0x6e:			// outb
      *size = 0;
      *port = ts->dx() & 0xffff;
      return true;
    case 0xed:			// in dx, eax
    case 0x6d:			// insd
    case 0xef:			// out eax, dx
    case 0x6f:			// outd
      *size = 2;
      *port = ts->dx() & 0xffff;
      if (*port + 4 <= Mem_layout::Io_port_max)
	return true;
      else		   // Access beyond L4_IOPORT_MAX
	return false;
    case 0xfa:			// cli
    case 0xfb:			// sti
      *size = 16; /* 16bit IO space */
      *port = 0;
      return true;
    }

  // handle 2 Byte IO
  if (!(eip < Kmem::mem_user_max - 1))
    return false;

  switch (mem_space()->peek((Unsigned8 *)eip, from_user))
    {
    case 0xe4:			// in imm8, al
    case 0xe6:			// out al, imm8
      *size = 0;
      *port = mem_space()->peek((Unsigned8 *)(eip + 1), from_user);
      return true;
    case 0xe5:			// in imm8, eax
    case 0xe7:			// out eax, imm8
      *size = 2;
      *port = mem_space()->peek((Unsigned8 *)(eip + 1), from_user);
      return *port + 4 <= Mem_layout::Io_port_max ? true : false;

    case 0x66:			// operand size override
      switch (mem_space()->peek((Unsigned8 *)(eip + 1), from_user))
	{
	case 0xed:			// in dx, ax
	case 0xef:			// out ax, dx
	case 0x6d:			// insw
	case 0x6f:			// outw
	  *size = 1;
	  *port = ts->dx() & 0xffff;
	  if (*port + 2 <= Mem_layout::Io_port_max)
	    return true;
	  else		   // Access beyond L4_IOPORT_MAX
	    return false;
	case 0xe5:			// in imm8, ax
	case 0xe7:			// out ax,imm8
	  *size = 1;
	  *port = mem_space()->peek((Unsigned8 *)(eip + 2), from_user);
	  if (*port + 2 <= Mem_layout::Io_port_max)
	    return true;
	  else
	    return false;
	}

    case 0xf3:			// REP
      switch (mem_space()->peek((Unsigned8*)(eip + 1), from_user))
	{
	case 0x6c:			// REP insb
	case 0x6e:			// REP outb
	  *size = 0;
	  *port = ts->dx() & 0xffff;
	  return true;
	case 0x6d:			// REP insd
	case 0x6f:			// REP outd
	  *size = 2;
	  *port = ts->dx() & 0xffff;
	  if (*port + 4 <= Mem_layout::Io_port_max)
	    return true;
	  else		   // Access beyond L4_IOPORT_MAX
	    return false;
	}
    }

  // handle 3 Byte IO
  if (!(eip < Kmem::mem_user_max - 2))
    return false;

  Unsigned16 w = mem_space()->peek((Unsigned16 *)eip, from_user);
  if (w == 0x66f3 || // sizeoverride REP
      w == 0xf366)   // REP sizeoverride
    {
      switch (mem_space()->peek((Unsigned8 *)(eip + 2), from_user))
	{
	case 0x6d:			// REP insw
	case 0x6f:			// REP outw
	  *size = 1;
	  *port = ts->dx() & 0xffff;
	  if (*port + 2 <= Mem_layout::Io_port_max)
	    return true;
	  else		   // Access beyond L4_IOPORT_MAX
	    return false;
	}
    }

  // nothing appropriate found
  return false;
}
Exemple #10
0
void pyne::Material::write_hdf5(std::string filename, std::string datapath, std::string nucpath, float row, int chunksize)
{
  // Turn off annoying HDF5 errors
  H5::Exception::dontPrint();

  // Create new/open datafile.
  H5::H5File db;
  if (pyne::file_exists(filename))
  {
    bool isH5 = H5::H5File::isHdf5(filename);
    if (!isH5)
      throw h5wrap::FileNotHDF5(filename);
    db = H5::H5File(filename, H5F_ACC_RDWR);
  }
  else
    db = H5::H5File(filename, H5F_ACC_TRUNC);

  //
  // Read in nuclist if available, write it out if not
  //
  bool nucpath_exists = h5wrap::path_exists(&db, nucpath);
  std::vector<int> nuclides;
  int nuc_size;
  hsize_t nuc_dims[1];
  
  if (nucpath_exists)
  {
    nuclides = h5wrap::h5_array_to_cpp_vector_1d<int>(&db, nucpath, H5::PredType::NATIVE_INT);
    nuc_size = nuclides.size();
    nuc_dims[0] = nuc_size;
  }
  else
  {
    nuclides = std::vector<int>();
    for (pyne::comp_iter i = comp.begin(); i != comp.end(); i++)
      nuclides.push_back(i->first);
    nuc_size = nuclides.size();

    // Create the data if it doesn't exist
    int nuc_data [nuc_size];
    for (int n = 0; n != nuc_size; n++)
      nuc_data[n] = nuclides[n];
    nuc_dims[0] = nuc_size;
    H5::DataSpace nuc_space(1, nuc_dims);
    H5::DataSet nuc_set = db.createDataSet(nucpath, H5::PredType::NATIVE_INT, nuc_space);
    nuc_set.write(nuc_data, H5::PredType::NATIVE_INT);
    db.flush(H5F_SCOPE_GLOBAL);
  };



  //
  // Write out to the file
  //
  H5::DataSet data_set;
  H5::DataSpace data_space, data_hyperslab;
  int data_rank = 1;
  hsize_t data_dims[1] = {1};
  hsize_t data_max_dims[1] = {H5S_UNLIMITED};
  hsize_t data_offset[1] = {0};

  size_t material_struct_size = sizeof(pyne::material_struct) + sizeof(double)*(nuc_size);
  H5::CompType data_desc(material_struct_size);
  H5::ArrayType comp_values_array_type (H5::PredType::NATIVE_DOUBLE, 1, nuc_dims);

  // make the data table type
  data_desc.insertMember("name", HOFFSET(pyne::material_struct, name), H5::StrType(0, 20));
  data_desc.insertMember("mass", HOFFSET(pyne::material_struct, mass), H5::PredType::NATIVE_DOUBLE);
  data_desc.insertMember("atoms_per_mol", HOFFSET(pyne::material_struct, atoms_per_mol), H5::PredType::NATIVE_DOUBLE);
  data_desc.insertMember("comp", HOFFSET(pyne::material_struct, comp), comp_values_array_type);

  // make the data array, have to over-allocate
  material_struct * mat_data  = (material_struct *) malloc(material_struct_size);
  int name_len = name.length();
  for (int i=0; i < 20; i++)
  {
    if (i < name_len)
      (*mat_data).name[i] = name[i];
    else
      (*mat_data).name[i] = NULL;
  };
  (*mat_data).mass = mass;
  (*mat_data).atoms_per_mol = atoms_per_mol;
  for (int n = 0; n != nuc_size; n++)
  {
    if (0 < comp.count(nuclides[n]))
      (*mat_data).comp[n] = comp[nuclides[n]];
    else
      (*mat_data).comp[n] = 0.0;
  };

  // get / make the data set
  bool datapath_exists = h5wrap::path_exists(&db, datapath);
  if (datapath_exists)
  {
    data_set = db.openDataSet(datapath);
    data_space = data_set.getSpace();
    data_rank = data_space.getSimpleExtentDims(data_dims, data_max_dims);

    // Determine the row size.
    int row_num = (int) row;

    if (std::signbit(row))
      row_num = data_dims[0] + row;  // careful, row is negative

    if (data_dims[0] <= row_num)
    {
      // row == -0, extend to data set so that we can append, or
      // row_num is larger than current dimension, resize to accomodate.
      data_dims[0] = row_num + 1;
      data_set.extend(data_dims);
    }
    else if (data_dims[0] < 0)
      throw h5wrap::HDF5BoundsError();

    data_offset[0] = row_num;
  }
  else
  {
    // Get full space
    data_space = H5::DataSpace(1, data_dims, data_max_dims);

    // Make data set properties to enable chunking
    H5::DSetCreatPropList data_set_params;
    hsize_t chunk_dims[1] ={chunksize}; 
    data_set_params.setChunk(1, chunk_dims);

    material_struct * data_fill_value  = (material_struct *) malloc(material_struct_size);
    for (int i=0; i < 20; i++)
      (*data_fill_value).name[i] = NULL;
    (*data_fill_value).mass = -1.0;
    (*data_fill_value).atoms_per_mol = -1.0;
    for (int n = 0; n != nuc_size; n++)
      (*data_fill_value).comp[n] = 0.0;
    data_set_params.setFillValue(data_desc, &data_fill_value);

    // Create the data set
    data_set = db.createDataSet(datapath, data_desc, data_space, data_set_params);
    data_set.extend(data_dims);

    // Add attribute pointing to nuc path
    H5::StrType nuc_attr_type(0, nucpath.length());
    H5::DataSpace nuc_attr_space(H5S_SCALAR);
    H5::Attribute nuc_attr = data_set.createAttribute("nucpath", nuc_attr_type, nuc_attr_space);
    nuc_attr.write(nuc_attr_type, nucpath);

    // Remember to de-allocate
    free(data_fill_value);
  };

  // Get the data hyperslab
  data_hyperslab = data_set.getSpace();
  hsize_t data_count[1] = {1};
  data_hyperslab.selectHyperslab(H5S_SELECT_SET, data_count, data_offset);

  // Get a memory space for writing
  H5::DataSpace mem_space (1, data_count, data_max_dims);

  // Write the row...
  data_set.write(mat_data, data_desc, mem_space, data_hyperslab);

  // Close out the HDF5 file
  db.close();

  // Remember the milk!  
  // ...by which I mean to deallocate
  free(mat_data);
};
Ram_quota *
Generic_obj_space<SPACE>::ram_quota() const
{ return mem_space()->ram_quota(); }