Exemplo n.º 1
0
static void
mem_delete_command (char *args, int from_tty)
{
  int num;
  struct get_number_or_range_state state;

  require_user_regions (from_tty);

  target_dcache_invalidate ();

  if (args == NULL || *args == '\0')
    {
      if (query (_("Delete all memory regions? ")))
	mem_clear ();
      dont_repeat ();
      return;
    }

  init_number_or_range (&state, args);
  while (!state.finished)
    {
      num = get_number_or_range (&state);
      mem_delete (num);
    }

  dont_repeat ();
}
void zlib_compress_config_t::reset()
{
    mem_clear(this, sizeof(*this));

    mem_level.reset();
    window_bits.reset();
    strategy.reset();
}
Exemplo n.º 3
0
void ed25519_Blinding_Finish(
    void *context)                      /* IN: blinding context */
{
    if (context)
    {
        mem_clear (context, sizeof(EDP_BLINDING_CTX));
        mem_free (context);
    }
}
Exemplo n.º 4
0
/* -- Blinding -------------------------------------------------------------
//
//  Blinding is a measure to protect against side channel attacks. 
//  Blinding randomizes the scalar multiplier.
//
//  Instead of calculating a*P, calculate (a+b mod BPO)*P + B
//
//  Where b = random blinding and B = -b*P
//
// -------------------------------------------------------------------------
*/
void *ed25519_Blinding_Init(
    void *context,                      /* IO: null or ptr blinding context */
    const unsigned char *seed,          /* IN: [size bytes] random blinding seed */
    size_t size)                        /* IN: size of blinding seed */
{
    struct {
        Ext_POINT T;
        U_WORD t[K_WORDS];
        SHA512_CTX H;
        U8 digest[SHA512_DIGEST_LENGTH];
    } d;

    EDP_BLINDING_CTX *ctx = (EDP_BLINDING_CTX*)context;

    if (ctx == 0)
    {
        ctx = (EDP_BLINDING_CTX*)mem_alloc(sizeof(EDP_BLINDING_CTX));
        if (ctx == 0) return 0;
    }

    /* Use edp_custom_blinding to protect generation of the new blinder */

    SHA512_Init(&d.H);
    SHA512_Update(&d.H, edp_custom_blinding.zr, 32);
    SHA512_Update(&d.H, seed, size);
    SHA512_Final(d.digest, &d.H);

    ecp_BytesToWords(ctx->zr, d.digest+32);
    ecp_BytesToWords(d.t, d.digest);
    eco_Mod(d.t);
    ecp_Sub(ctx->bl, _w_BPO, d.t);

    eco_AddReduce(d.t, d.t, edp_custom_blinding.bl);
    edp_BasePointMult(&d.T, d.t, edp_custom_blinding.zr);
    edp_AddPoint(&d.T, &d.T, &edp_custom_blinding.BP);

    edp_ExtPoint2PE(&ctx->BP, &d.T);

    /* clear potentially sensitive data */
    mem_clear (&d, sizeof(d));

    return ctx;
}
Exemplo n.º 5
0
/*
 * The wizard completes the fellowship by waiting
 * for the other members to join, then releasing
 * them from the barrier.  The assymetry between
 * the wizard and the other threads prevents a deadlock
 * that could otherwise occur from all of the fellowship members
 * waiting on each other's CVs.
 */
static void
wizard(void *p, unsigned long which)
{
  (void)p;
  
  const char *names[FOTR_SIZE];
  names[0] = nameof_istari(which);
  int name_idx = 1;
  
  // only one Wizard is allowed to be in this 'warlock'
  // critical section at a time
  lock_acquire(fotr.warlock);
  for (member *m = &fotr.men[0]; m <= &fotr.hobbits[3]; m++)
  {
    lock_acquire(m->m_lock);
    while (m->m_name == NULL)
      cv_wait(m->m_cv, m->m_lock);
    names[name_idx] = m->m_name;
    name_idx++;
    lock_release(m->m_lock);
  }
  fotr.generation++;
  
  lock_acquire(print_lock);
  kprintf("FELLOWSHIP:\t%s, %s, %s, %s, %s, %s, %s, %s, %s\n",
          names[0], names[1], names[2], names[3], names[4],
          names[5], names[6], names[7], names[8]);
  lock_release(print_lock);
  
  // The wizard now clears the way for the next fellowship
  for (member *m = &fotr.men[0]; m <= &fotr.hobbits[3]; m++)
    mem_clear(m);
  lock_release(fotr.warlock);
  
  leave(nameof_istari(which));
  V(done_sem);
}
Exemplo n.º 6
0
/**
* Setup PML4 pages to enter Long Mode
* @param ammount - ammount of memory to map
*/
static void setup_pages(uint64 ammount){
	uint64 p;
	uint64 t;
	uint64 d;
	uint64 dr;
	uint64 ptr;
	
	// Single page (PML1 entry) holds 4KB of RAM
	uint64 page_count = ammount / PAGE_SIZE;
	if (ammount % PAGE_SIZE > 0){
		page_count ++;
	}
	// Single table (PML2 entry) holds 2MB of RAM
	uint64 table_count = page_count / 512;
	if (page_count % 512 > 0){
		table_count ++;
	}
	// Single directory (PML3 entry, directory table pointer) holds 1GB of RAM
	uint64 directory_count = table_count / 512;
	if (table_count % 512 > 0){
		directory_count ++;
	}
	// Single drawer (PML4 entry) holds 512GB of RAM
	uint64 drawer_count = directory_count / 512;
	if (directory_count % 512 > 0){
		drawer_count ++;
	}
	
	// Position the page table structures in memory

	// Located at 0x00100000 (1MB mark, see config.h)
	// a.k.a. PML4T (512GB per entry = 256TB total, this is a page cabinet)
	// Holds 512 entries, only 1st is active - enough to map 512GB
	pm_t *pml4 = (pm_t*)PT_LOC; 
	// Located at PML4 + (8 * 512)
	// a.k.a. PDPT (page directory pointer table, 1GB per entry, let's call this a page drawer)
	// Holds 512 entries, each entry maps up to 1GB, table = 512GB
	pm_t *pml3 = (pm_t*)(((uint32)pml4) + (sizeof(pm_t) * 512));
	// Located at PML3 + (8 * 512 * drawer_count)
	// a.k.a. PD (page directory, 2MB per entry)
	// Holds 512 entries * directory_count, each entry maps up to 2MB, table = 1GB
	pm_t *pml2 = (pm_t*)(((uint32)pml3) + (sizeof(pm_t) * 512 * (uint32)drawer_count));
	// Located at PML2 + (8 * 512 * directory_count)
	// a.k.a. PT (page table, 4KB per entry)
	// Holds 512 entries * table_count, each entry maps 4KB, table = 2MB
	pm_t *pml1 = (pm_t*)(((uint32)pml2) + (sizeof(pm_t) * 512 * (uint32)directory_count));
	
	// Clear memory region where the page tables will reside
	mem_clear((uint8 *)pml4, sizeof(pm_t) * 512);
	mem_clear((uint8 *)pml3, sizeof(pm_t) * 512 * drawer_count);
	mem_clear((uint8 *)pml2, sizeof(pm_t) * 512 * directory_count);
	mem_clear((uint8 *)pml1, sizeof(pm_t) * 512 * table_count);

	// Set up pages, tables, directories and drawers in the cabinet :)
	for (p = 0; p < page_count; p ++){
		ptr = (uint64)(p * PAGE_SIZE);
		pml1[p].raw = ptr & PAGE_MASK;
		pml1[p].s.present = 1;
		pml1[p].s.writable = 1;
		pml1[p].s.write_through = 1;
		//pml1[p].s.cache_disable = 1;
		//pml1[p].s.global = 1;
	}
	for (t = 0; t < table_count; t ++){
		ptr = (uint64)(((uint32)pml1) + (sizeof(pm_t) * 512 * t));
		pml2[t].raw = ptr & PAGE_MASK;
		pml2[t].s.present = 1;
		pml2[t].s.writable = 1;
		pml2[t].s.write_through = 1;
		//pml2[t].s.cache_disable = 1;
	}
	for (d = 0; d < directory_count; d ++){
		ptr = (uint64)(((uint32)pml2) + (sizeof(pm_t) * 512 * d));
		pml3[d].raw = ptr & PAGE_MASK;
		pml3[d].s.present = 1;
		pml3[d].s.writable = 1;
		pml3[d].s.write_through = 1;
		//pml3[d].s.cache_disable = 1;
	}
	for (dr = 0; dr < drawer_count; dr ++){
		ptr = (uint64)(((uint32)pml3) + (sizeof(pm_t) * 512 * dr));
		pml4[dr].raw = ptr & PAGE_MASK;
		pml4[dr].s.present = 1;
		pml4[dr].s.writable = 1;
		pml4[dr].s.write_through = 1;
		//pml4[dr].s.cache_disable = 1;
	}

	// Set PML4 pointer address
	pml4_ptr32 = (uint32)pml4; // Point to our cabinet :)
}
Exemplo n.º 7
0
Arquivo: main.c Projeto: calint/a
static inline void file_clear(file*o,color colr){
	mem_clear(o->address.i,o->size_in_bytes.i,colr.i);
}
Exemplo n.º 8
0
static void
mem_command (char *args, int from_tty)
{
  CORE_ADDR lo, hi;
  char *tok;
  struct mem_attrib attrib;

  if (!args)
    error_no_arg (_("No mem"));

  /* For "mem auto", switch back to using a target provided list.  */
  if (strcmp (args, "auto") == 0)
    {
      if (mem_use_target)
	return;

      if (mem_region_list != target_mem_region_list)
	{
	  mem_clear ();
	  mem_region_list = target_mem_region_list;
	}

      mem_use_target = 1;
      return;
    }

  require_user_regions (from_tty);

  tok = strtok (args, " \t");
  if (!tok)
    error (_("no lo address"));
  lo = parse_and_eval_address (tok);

  tok = strtok (NULL, " \t");
  if (!tok)
    error (_("no hi address"));
  hi = parse_and_eval_address (tok);

  attrib = default_mem_attrib;
  while ((tok = strtok (NULL, " \t")) != NULL)
    {
      if (strcmp (tok, "rw") == 0)
	attrib.mode = MEM_RW;
      else if (strcmp (tok, "ro") == 0)
	attrib.mode = MEM_RO;
      else if (strcmp (tok, "wo") == 0)
	attrib.mode = MEM_WO;

      else if (strcmp (tok, "8") == 0)
	attrib.width = MEM_WIDTH_8;
      else if (strcmp (tok, "16") == 0)
	{
	  if ((lo % 2 != 0) || (hi % 2 != 0))
	    error (_("region bounds not 16 bit aligned"));
	  attrib.width = MEM_WIDTH_16;
	}
      else if (strcmp (tok, "32") == 0)
	{
	  if ((lo % 4 != 0) || (hi % 4 != 0))
	    error (_("region bounds not 32 bit aligned"));
	  attrib.width = MEM_WIDTH_32;
	}
      else if (strcmp (tok, "64") == 0)
	{
	  if ((lo % 8 != 0) || (hi % 8 != 0))
	    error (_("region bounds not 64 bit aligned"));
	  attrib.width = MEM_WIDTH_64;
	}

#if 0
      else if (strcmp (tok, "hwbreak") == 0)
	attrib.hwbreak = 1;
      else if (strcmp (tok, "swbreak") == 0)
	attrib.hwbreak = 0;
#endif

      else if (strcmp (tok, "cache") == 0)
	attrib.cache = 1;
      else if (strcmp (tok, "nocache") == 0)
	attrib.cache = 0;

#if 0
      else if (strcmp (tok, "verify") == 0)
	attrib.verify = 1;
      else if (strcmp (tok, "noverify") == 0)
	attrib.verify = 0;
#endif

      else
	error (_("unknown attribute: %s"), tok);
    }

  create_mem_region (lo, hi, &attrib);
}