Esempio n. 1
0
/** Create new frame zone.
 *
 * @param zone     Zone to construct.
 * @param start    Physical address of the first frame within the zone.
 * @param count    Count of frames in zone.
 * @param flags    Zone flags.
 * @param confdata Configuration data of the zone.
 *
 * @return Initialized zone.
 *
 */
NO_TRACE static void zone_construct(zone_t *zone, pfn_t start, size_t count,
    zone_flags_t flags, void *confdata)
{
	zone->base = start;
	zone->count = count;
	zone->flags = flags;
	zone->free_count = count;
	zone->busy_count = 0;
	
	if (flags & ZONE_AVAILABLE) {
		/*
		 * Initialize frame bitmap (located after the array of
		 * frame_t structures in the configuration space).
		 */
		
		bitmap_initialize(&zone->bitmap, count, confdata +
		    (sizeof(frame_t) * count));
		bitmap_clear_range(&zone->bitmap, 0, count);
		
		/*
		 * Initialize the array of frame_t structures.
		 */
		
		zone->frames = (frame_t *) confdata;
		
		for (size_t i = 0; i < count; i++)
			frame_initialize(&zone->frames[i]);
	} else {
		bitmap_initialize(&zone->bitmap, 0, NULL);
		zone->frames = NULL;
	}
}
Esempio n. 2
0
/** Enable I/O space range for task.
 *
 * Interrupts are disabled and task is locked.
 *
 * @param task   Task.
 * @param ioaddr Starting I/O space address.
 * @param size   Size of the enabled I/O range.
 *
 * @return EOK on success or an error code from errno.h.
 *
 */
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
	size_t elements = ioaddr + size;
	if (elements > IO_PORTS)
		return ENOENT;
	
	if (task->arch.iomap.elements < elements) {
		/*
		 * The I/O permission bitmap is too small and needs to be grown.
		 */
		
		void *store = malloc(bitmap_size(elements), FRAME_ATOMIC);
		if (!store)
			return ENOMEM;
		
		bitmap_t oldiomap;
		bitmap_initialize(&oldiomap, task->arch.iomap.elements,
		    task->arch.iomap.bits);
		
		bitmap_initialize(&task->arch.iomap, elements, store);
		
		/*
		 * Mark the new range inaccessible.
		 */
		bitmap_set_range(&task->arch.iomap, oldiomap.elements,
		    elements - oldiomap.elements);
		
		/*
		 * In case there really existed smaller iomap,
		 * copy its contents and deallocate it.
		 */
		if (oldiomap.bits) {
			bitmap_copy(&task->arch.iomap, &oldiomap,
			    oldiomap.elements);
			
			free(oldiomap.bits);
		}
	}
	
	/*
	 * Enable the range and we are done.
	 */
	bitmap_clear_range(&task->arch.iomap, (size_t) ioaddr, size);
	
	/*
	 * Increment I/O Permission bitmap generation counter.
	 */
	task->arch.iomapver++;
	
	return EOK;
}
Esempio n. 3
0
/* Allocate and initialize data needed for global pseudo live
   analysis.  */
static void
initiate_live_solver (void)
{
  bitmap_initialize (&all_hard_regs_bitmap, &reg_obstack);
  bitmap_set_range (&all_hard_regs_bitmap, 0, FIRST_PSEUDO_REGISTER);
  bb_data = XNEWVEC (struct bb_data_pseudos, last_basic_block_for_fn (cfun));
  bitmap_initialize (&all_blocks, &reg_obstack);

  basic_block bb;
  FOR_ALL_BB_FN (bb, cfun)
    {
      bb_data_t bb_info = get_bb_data (bb);
      bb_info->bb = bb;
      bitmap_initialize (&bb_info->killed_pseudos, &reg_obstack);
      bitmap_initialize (&bb_info->gen_pseudos, &reg_obstack);
      bitmap_set_bit (&all_blocks, bb->index);
    }
Esempio n. 4
0
/** Install I/O Permission bitmap.
 *
 * Current task's I/O permission bitmap, if any, is installed
 * in the current CPU's TSS.
 *
 * Interrupts must be disabled prior this call.
 *
 */
void io_perm_bitmap_install(void)
{
    /* First, copy the I/O Permission Bitmap. */
    irq_spinlock_lock(&TASK->lock, false);

    size_t ver = TASK->arch.iomapver;
    size_t elements = TASK->arch.iomap.elements;

    if (elements > 0) {
        ASSERT(TASK->arch.iomap.bits);

        bitmap_t iomap;
        bitmap_initialize(&iomap, TSS_IOMAP_SIZE * 8,
                          CPU->arch.tss->iomap);
        bitmap_copy(&iomap, &TASK->arch.iomap, elements);

        /*
         * Set the trailing bits in the last byte of the map to disable
         * I/O access.
         */
        bitmap_set_range(&iomap, elements,
                         ALIGN_UP(elements, 8) - elements);

        /*
         * It is safe to set the trailing eight bits because of the
         * extra convenience byte in TSS_IOMAP_SIZE.
         */
        bitmap_set_range(&iomap, ALIGN_UP(elements, 8), 8);
    }

    irq_spinlock_unlock(&TASK->lock, false);

    /*
     * Second, adjust TSS segment limit.
     * Take the extra ending byte with all bits set into account.
     */
    ptr_16_64_t cpugdtr;
    gdtr_store(&cpugdtr);

    descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base;
    size_t size = bitmap_size(elements);
    gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + size);
    gdtr_load(&cpugdtr);

    /*
     * Before we load new TSS limit, the current TSS descriptor
     * type must be changed to describe inactive TSS.
     */
    tss_descriptor_t *tss_desc = (tss_descriptor_t *) &gdt_p[TSS_DES];
    tss_desc->type = AR_TSS;
    tr_load(GDT_SELECTOR(TSS_DES));

    /*
     * Update the generation count so that faults caused by
     * early accesses can be serviced.
     */
    CPU->arch.iomapver_copy = ver;
}
int main()
{
	struct Bitmap bitmap;

	bitmap_initialize(&bitmap);
	bitmap_setatindex(-1, &bitmap);
	bitmap_setatindex(7, &bitmap);
	bitmap_setatindex(7, &bitmap);
	bitmap_removeatindex(7, &bitmap);
	bitmap_setatindex(7, &bitmap);

	int block_size = 400;
	int index = bitmap_findemptyblockofsize(block_size, &bitmap);
	bitmap_setblockofsize(block_size, &bitmap);
	_bitmap_print_raw(&bitmap);

	bitmap_removeblockofsize(index, block_size, &bitmap);
	_bitmap_print_raw(&bitmap);
	
	return 0;
}
Esempio n. 6
0
/** Enable I/O space range for task.
 *
 * Interrupts are disabled and task is locked.
 *
 * @param task	 Task.
 * @param ioaddr Starting I/O space address.
 * @param size	 Size of the enabled I/O range.
 *
 * @return EOK on success or an error code from errno.h.
 */
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
    if (!task->arch.iomap) {
        task->arch.iomap = malloc(sizeof(bitmap_t), 0);
        if (task->arch.iomap == NULL)
            return ENOMEM;

        void *store = malloc(bitmap_size(IO_MEMMAP_PAGES), 0);
        if (store == NULL)
            return ENOMEM;

        bitmap_initialize(task->arch.iomap, IO_MEMMAP_PAGES, store);
        bitmap_clear_range(task->arch.iomap, 0, IO_MEMMAP_PAGES);
    }

    uintptr_t iopage = ioaddr / PORTS_PER_PAGE;
    size = ALIGN_UP(size + ioaddr - 4 * iopage, PORTS_PER_PAGE);
    bitmap_set_range(task->arch.iomap, iopage, size / 4);

    return EOK;
}
Esempio n. 7
0
/** Merge two zones.
 *
 * Assume z1 & z2 are locked and compatible and zones lock is
 * locked.
 *
 * @param z1       First zone to merge.
 * @param z2       Second zone to merge.
 * @param old_z1   Original data of the first zone.
 * @param confdata Merged zone configuration data.
 *
 */
NO_TRACE static void zone_merge_internal(size_t z1, size_t z2, zone_t *old_z1,
    void *confdata)
{
	ASSERT(zones.info[z1].flags & ZONE_AVAILABLE);
	ASSERT(zones.info[z2].flags & ZONE_AVAILABLE);
	ASSERT(zones.info[z1].flags == zones.info[z2].flags);
	ASSERT(zones.info[z1].base < zones.info[z2].base);
	ASSERT(!overlaps(zones.info[z1].base, zones.info[z1].count,
	    zones.info[z2].base, zones.info[z2].count));
	
	/* Difference between zone bases */
	pfn_t base_diff = zones.info[z2].base - zones.info[z1].base;
	
	zones.info[z1].count = base_diff + zones.info[z2].count;
	zones.info[z1].free_count += zones.info[z2].free_count;
	zones.info[z1].busy_count += zones.info[z2].busy_count;
	
	bitmap_initialize(&zones.info[z1].bitmap, zones.info[z1].count,
	    confdata + (sizeof(frame_t) * zones.info[z1].count));
	bitmap_clear_range(&zones.info[z1].bitmap, 0, zones.info[z1].count);
	
	zones.info[z1].frames = (frame_t *) confdata;
	
	/*
	 * Copy frames and bits from both zones to preserve parents, etc.
	 */
	
	for (size_t i = 0; i < old_z1->count; i++) {
		bitmap_set(&zones.info[z1].bitmap, i,
		    bitmap_get(&old_z1->bitmap, i));
		zones.info[z1].frames[i] = old_z1->frames[i];
	}
	
	for (size_t i = 0; i < zones.info[z2].count; i++) {
		bitmap_set(&zones.info[z1].bitmap, base_diff + i,
		    bitmap_get(&zones.info[z2].bitmap, i));
		zones.info[z1].frames[base_diff + i] =
		    zones.info[z2].frames[i];
	}
}
Esempio n. 8
0
/* Spill pseudos which are assigned to hard registers in SET.  Add
   affected insns for processing in the subsequent constraint
   pass.  */
static void
spill_pseudos (HARD_REG_SET set)
{
  int i;
  bitmap_head to_process;
  rtx_insn *insn;

  if (hard_reg_set_empty_p (set))
    return;
  if (lra_dump_file != NULL)
    {
      fprintf (lra_dump_file, "	   Spilling non-eliminable hard regs:");
      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
	if (TEST_HARD_REG_BIT (set, i))
	  fprintf (lra_dump_file, " %d", i);
      fprintf (lra_dump_file, "\n");
    }
  bitmap_initialize (&to_process, &reg_obstack);
  for (i = FIRST_PSEUDO_REGISTER; i < max_reg_num (); i++)
    if (lra_reg_info[i].nrefs != 0 && reg_renumber[i] >= 0
	&& overlaps_hard_reg_set_p (set,
				    PSEUDO_REGNO_MODE (i), reg_renumber[i]))
      {
	if (lra_dump_file != NULL)
	  fprintf (lra_dump_file, "	 Spilling r%d(%d)\n",
		   i, reg_renumber[i]);
	reg_renumber[i] = -1;
	bitmap_ior_into (&to_process, &lra_reg_info[i].insn_bitmap);
      }
  IOR_HARD_REG_SET (lra_no_alloc_regs, set);
  for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
    if (bitmap_bit_p (&to_process, INSN_UID (insn)))
      {
	lra_push_insn (insn);
	lra_set_used_insn_alternative (insn, -1);
      }
  bitmap_clear (&to_process);
}
Esempio n. 9
0
static void
collect_pattern_seqs (void)
{
  htab_iterator hti0, hti1, hti2;
  p_hash_bucket hash_bucket;
  p_hash_elem e0, e1;
#if defined STACK_REGS || defined HAVE_cc0
  basic_block bb;
  bitmap_head dont_collect;

  /* Extra initialization step to ensure that no stack registers (if present)
     or cc0 code (if present) are live across abnormal edges.
     Set a flag in DONT_COLLECT for an insn if a stack register is live
     after the insn or the insn is cc0 setter or user.  */
  bitmap_initialize (&dont_collect, NULL);

#ifdef STACK_REGS
  FOR_EACH_BB (bb)
  {
    regset_head live;
    rtx insn;
    rtx prev;

    /* Initialize liveness propagation.  */
    INIT_REG_SET (&live);
    bitmap_copy (&live, DF_LR_OUT (bb));
    df_simulate_initialize_backwards (bb, &live);

    /* Propagate liveness info and mark insns where a stack reg is live.  */
    insn = BB_END (bb);
    for (insn = BB_END (bb); ; insn = prev)
      {
	prev = PREV_INSN (insn);
	if (INSN_P (insn))
	  {
	    int reg;
	    for (reg = FIRST_STACK_REG; reg <= LAST_STACK_REG; reg++)
	      {
		if (REGNO_REG_SET_P (&live, reg))
		  {
		    bitmap_set_bit (&dont_collect, INSN_UID (insn));
		    break;
		  }
	      }
	    
	  }
	if (insn == BB_HEAD (bb))
	  break;
	df_simulate_one_insn_backwards (bb, insn, &live);
	insn = prev;
      }

    /* Free unused data.  */
    CLEAR_REG_SET (&live);
  }
#endif

#ifdef HAVE_cc0
  /* Mark CC0 setters and users as ineligible for collection into sequences.
     This is an over-conservative fix, since it is OK to include
     a cc0_setter, but only if we also include the corresponding cc0_user,
     and vice versa.  */
  FOR_EACH_BB (bb)
  {
    rtx insn;
    rtx next_tail;

    next_tail = NEXT_INSN (BB_END (bb));

    for (insn = BB_HEAD (bb); insn != next_tail; insn = NEXT_INSN (insn))
      {
	if (INSN_P (insn) && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
	  bitmap_set_bit (&dont_collect, INSN_UID (insn));
      }
  }
#endif

#endif /* defined STACK_REGS || defined HAVE_cc0 */

  /* Initialize PATTERN_SEQS to empty.  */
  pattern_seqs = 0;

  /* Try to match every abstractable insn with every other insn in the same
     HASH_BUCKET.  */

  FOR_EACH_HTAB_ELEMENT (hash_buckets, hash_bucket, p_hash_bucket, hti0)
    if (htab_elements (hash_bucket->seq_candidates) > 1)
      FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e0, p_hash_elem, hti1)
        FOR_EACH_HTAB_ELEMENT (hash_bucket->seq_candidates, e1, p_hash_elem,
                               hti2)
          if (e0 != e1
#if defined STACK_REGS || defined HAVE_cc0
              && !bitmap_bit_p (&dont_collect, INSN_UID (e0->insn))
              && !bitmap_bit_p (&dont_collect, INSN_UID (e1->insn))
#endif
             )
            match_seqs (e0, e1);
#if defined STACK_REGS || defined HAVE_cc0
  /* Free unused data.  */
  bitmap_clear (&dont_collect);
#endif
}
Esempio n. 10
0
/* The major function for aggressive pseudo coalescing of moves only
   if the both pseudos were spilled and not special reload pseudos.  */
bool
lra_coalesce (void)
{
  basic_block bb;
  rtx mv, set, insn, next, *sorted_moves;
  int i, mv_num, sregno, dregno;
  int coalesced_moves;
  int max_regno = max_reg_num ();
  bitmap_head involved_insns_bitmap;

  timevar_push (TV_LRA_COALESCE);

  if (lra_dump_file != NULL)
    fprintf (lra_dump_file,
	     "\n********** Pseudos coalescing #%d: **********\n\n",
	     ++lra_coalesce_iter);
  first_coalesced_pseudo = XNEWVEC (int, max_regno);
  next_coalesced_pseudo = XNEWVEC (int, max_regno);
  for (i = 0; i < max_regno; i++)
    first_coalesced_pseudo[i] = next_coalesced_pseudo[i] = i;
  sorted_moves = XNEWVEC (rtx, get_max_uid ());
  mv_num = 0;
  /* Collect moves.  */
  coalesced_moves = 0;
  FOR_EACH_BB (bb)
    {
      FOR_BB_INSNS_SAFE (bb, insn, next)
	if (INSN_P (insn)
	    && (set = single_set (insn)) != NULL_RTX
	    && REG_P (SET_DEST (set)) && REG_P (SET_SRC (set))
	    && (sregno = REGNO (SET_SRC (set))) >= FIRST_PSEUDO_REGISTER
	    && (dregno = REGNO (SET_DEST (set))) >= FIRST_PSEUDO_REGISTER
	    && mem_move_p (sregno, dregno)
	    && coalescable_pseudo_p (sregno) && coalescable_pseudo_p (dregno)
	    && ! side_effects_p (set)
	    && !(lra_intersected_live_ranges_p
		 (lra_reg_info[sregno].live_ranges,
		  lra_reg_info[dregno].live_ranges)))
	  sorted_moves[mv_num++] = insn;
    }
  qsort (sorted_moves, mv_num, sizeof (rtx), move_freq_compare_func);
  /* Coalesced copies, most frequently executed first.	*/
  bitmap_initialize (&coalesced_pseudos_bitmap, &reg_obstack);
  bitmap_initialize (&involved_insns_bitmap, &reg_obstack);
  for (i = 0; i < mv_num; i++)
    {
      mv = sorted_moves[i];
      set = single_set (mv);
      lra_assert (set != NULL && REG_P (SET_SRC (set))
		  && REG_P (SET_DEST (set)));
      sregno = REGNO (SET_SRC (set));
      dregno = REGNO (SET_DEST (set));
      if (first_coalesced_pseudo[sregno] == first_coalesced_pseudo[dregno])
	{
	  coalesced_moves++;
	  if (lra_dump_file != NULL)
	    fprintf
	      (lra_dump_file, "      Coalescing move %i:r%d-r%d (freq=%d)\n",
	       INSN_UID (mv), sregno, dregno,
	       BLOCK_FOR_INSN (mv)->frequency);
	  /* We updated involved_insns_bitmap when doing the merge.  */
	}
      else if (!(lra_intersected_live_ranges_p
		 (lra_reg_info[first_coalesced_pseudo[sregno]].live_ranges,
		  lra_reg_info[first_coalesced_pseudo[dregno]].live_ranges)))
	{
	  coalesced_moves++;
	  if (lra_dump_file != NULL)
	    fprintf
	      (lra_dump_file,
	       "  Coalescing move %i:r%d(%d)-r%d(%d) (freq=%d)\n",
	       INSN_UID (mv), sregno, ORIGINAL_REGNO (SET_SRC (set)),
	       dregno, ORIGINAL_REGNO (SET_DEST (set)),
	       BLOCK_FOR_INSN (mv)->frequency);
	  bitmap_ior_into (&involved_insns_bitmap,
			   &lra_reg_info[sregno].insn_bitmap);
	  bitmap_ior_into (&involved_insns_bitmap,
			   &lra_reg_info[dregno].insn_bitmap);
	  merge_pseudos (sregno, dregno);
	}
    }
  bitmap_initialize (&used_pseudos_bitmap, &reg_obstack);
  FOR_EACH_BB (bb)
    {
      update_live_info (df_get_live_in (bb));
      update_live_info (df_get_live_out (bb));
      FOR_BB_INSNS_SAFE (bb, insn, next)
	if (INSN_P (insn)
	    && bitmap_bit_p (&involved_insns_bitmap, INSN_UID (insn)))
	  {
	    if (! substitute (&insn))
	      continue;
	    lra_update_insn_regno_info (insn);
	    if ((set = single_set (insn)) != NULL_RTX && set_noop_p (set))
	      {
		/* Coalesced move.  */
		if (lra_dump_file != NULL)
		  fprintf (lra_dump_file, "	 Removing move %i (freq=%d)\n",
			 INSN_UID (insn), BLOCK_FOR_INSN (insn)->frequency);
		lra_set_insn_deleted (insn);
	      }
	  }
    }
  bitmap_clear (&used_pseudos_bitmap);
  bitmap_clear (&involved_insns_bitmap);
  bitmap_clear (&coalesced_pseudos_bitmap);
  if (lra_dump_file != NULL && coalesced_moves != 0)
    fprintf (lra_dump_file, "Coalesced Moves = %d\n", coalesced_moves);
  free (sorted_moves);
  free (next_coalesced_pseudo);
  free (first_coalesced_pseudo);
  timevar_pop (TV_LRA_COALESCE);
  return coalesced_moves != 0;
}
Esempio n. 11
0
/** Perform ia32 specific task initialization.
 *
 * @param t Task to be initialized.
 */
void task_create_arch(task_t *t)
{
	t->arch.iomapver = 0;
	bitmap_initialize(&t->arch.iomap, NULL, 0);
}