Example #1
0
int __fnref_register(const char *libname,
		     int libtag, int cbirev,
		     const char *symname, void (*fn)(void))
{
	unsigned int hash;
	size_t len;
	int pos;

	if ((unsigned int)libtag >= MAX_FNLIBS)
		early_panic("reference table overflow for library %s",
			    libname);

	pos = nrefs[libtag]++;
	if (pos >= MAX_FNREFS)
		early_panic("too many function references in library %s (> %d)",
			    libname, MAX_FNREFS);

	assert(__fnrefs[libtag][pos].fn == NULL);
	__fnrefs[libtag][pos].fn = fn;
	len = strlen(symname);
	hash = __hash_key(symname, len, 0);
	hash = __hash_key(&cbirev, sizeof(cbirev), hash);
	__fnrefs[libtag][pos].hash = hash & 0xfffff;

	return __refmangle(libtag, hash, pos);
}
Example #2
0
/*
 * Will be called once on behalf of xenomai_init(), and when
 * re-binding after a fork.
 */
static void init_bind(void)
{
	cobalt_umm_private = map_umm(COBALT_MEMDEV_PRIVATE, &private_size);
	if (cobalt_umm_private == MAP_FAILED) {
		early_warning("cannot map private umm area: %s",
			      strerror(errno));
		early_panic("(CONFIG_DEVTMPFS_MOUNT not enabled?)");
	}
}
Example #3
0
/**
 * get_xen_paddr - get physical address to relocate Xen to
 *
 * Xen is relocated to as near to the top of RAM as possible and
 * aligned to a XEN_PADDR_ALIGN boundary.
 */
static paddr_t __init get_xen_paddr(void)
{
    struct dt_mem_info *mi = &early_info.mem;
    paddr_t min_size;
    paddr_t paddr = 0, last_end;
    int i;

    min_size = (_end - _start + (XEN_PADDR_ALIGN-1)) & ~(XEN_PADDR_ALIGN-1);

    last_end = mi->bank[0].start;

    /* Find the highest bank with enough space. */
    for ( i = 0; i < mi->nr_banks; i++ )
    {
        const struct membank *bank = &mi->bank[i];
        paddr_t s, e;

        /* We can only deal with contiguous memory at the moment */
        if ( last_end != bank->start )
            break;

        last_end = bank->start + bank->size;

        if ( bank->size >= min_size )
        {
            e = consider_modules(bank->start, bank->start + bank->size,
                                 min_size, XEN_PADDR_ALIGN, 1);
            if ( !e )
                continue;

#ifdef CONFIG_ARM_32
            /* Xen must be under 4GB */
            if ( e > 0x100000000ULL )
                e = 0x100000000ULL;
            if ( e < bank->start )
                continue;
#endif

            s = e - min_size;

            if ( s > paddr )
                paddr = s;
        }
    }

    if ( !paddr )
        early_panic("Not enough memory to relocate Xen");

    early_printk("Placing Xen at 0x%"PRIpaddr"-0x%"PRIpaddr"\n",
                 paddr, paddr + min_size);

    early_info.modules.module[MOD_XEN].start = paddr;
    early_info.modules.module[MOD_XEN].size = min_size;

    return paddr;
}
Example #4
0
static void trank_init_context(void)
{
	struct trank_context *tc;

	tc = malloc(sizeof(*tc));
	if (tc == NULL)
		early_panic("error creating TSD: %s", strerror(ENOMEM));
		
	memset(tc, 0, sizeof(*tc));
	pthread_setspecific(trank_context_key, tc);
}
Example #5
0
/* Will be called only once, upon call to xenomai_init(). */
static void init_loadup(__u32 vdso_offset)
{
	uint32_t size;

	cobalt_umm_shared = map_umm(COBALT_MEMDEV_SHARED, &size);
	if (cobalt_umm_shared == MAP_FAILED)
		early_panic("cannot map shared umm area: %s",
			    strerror(errno));

	cobalt_vdso = (struct xnvdso *)(cobalt_umm_shared + vdso_offset);
}
Example #6
0
int trank_init_interface(void)
{
#ifndef HAVE_TLS
	int ret;

	ret = pthread_key_create(&trank_context_key, __trank_destroy_context);
	if (ret)
		early_panic("error creating TSD key: %s", strerror(ret));
#endif
	sigaddset(&trank_sigperiod_set, SIGPERIOD);
	cobalt_register_tsd_hook(&tsd_hook);

	return 0;
}
Example #7
0
File: timer.c Project: mrd/terrier
void timer_init(void)
{
  int i;
#ifdef USE_VMM
  if(vmm_map_region(&l4wakeup_region) != OK) {
    early_panic("Unable to map L4 wakeup registers.");
    return;
  }
  if(vmm_map_region(&l4perip_region) != OK) {
    early_panic("Unable to map L4 peripheral registers.");
    return;
  }
#ifdef OMAP3530
  if(vmm_map_region(&l4core_region) != OK) {
    early_panic("Unable to map L4 core registers.");
    return;
  }
#endif
#ifdef OMAP4460
  if(vmm_map_region(&l4abe_region) != OK) {
    early_panic("Unable to map L4 ABE registers.");
    return;
  }
#endif
#endif

#ifdef OMAP3530
  /* For some reason, this is defaulting to SYS_CLK on my board, even
   * though the manual says otherwise. Set it to 32K_FCLK, div-by-1. */
  *CM_CLKSEL_WKUP = (*CM_CLKSEL_WKUP & (~0x7)) | 0x2;
#endif
#ifdef OMAP4460
  DLOG(1, "CM_WKUP_GPTIMER1_CLKCTRL=%#x (%s)\n", *CM_WKUP_GPTIMER1_CLKCTRL, GETBITS(*CM_WKUP_GPTIMER1_CLKCTRL, 24, 1) ? "32kHz" : "SYS_CLK");
#endif

#ifdef OMAP4460
  GBLTIMER = (void *) (arm_config_base_address() + 0x200);
  PVTTIMER = (void *) (arm_config_base_address() + 0x600);
  WATCHDOG = (void *) (arm_config_base_address() + 0x620);
#ifdef USE_VMM
  timer_region.pstart = ((u32) PVTTIMER) & 0xFFF00000;
  timer_region.vstart = (void *) (((u32) PVTTIMER) & 0xFFF00000);
  /* ensure mapping of private/global timers */
  if(vmm_map_region(&timer_region) != OK) {
    early_panic("Unable to map private/global timers.");
    return;
  }
#endif
  PVTTIMER->control = 0;
  WATCHDOG->control = 0;
  GBLTIMER->control = 0;
#endif

  for(i=0;i<NUM_TIMERS;i++) {
    intc_set_irq_handler(GPTIMER_BASE_IRQ + i, timer_irq_handler);
    intc_unmask_irq(GPTIMER_BASE_IRQ + i);
    DLOG(1, "gptimer[%d] revision=%#x TCLR=%#x\n", i+1, gptimer[i+1]->TIDR, gptimer[i+1]->TCLR);
  }

  /* OMAP4460 TRM p892 */
  DLOG(1, "*CM_CLKMODE_DPLL_PER=%#x\n", *((u32 *) 0x4A008140));
  /* OMAP4460 TRM p895 */
  DLOG(1, "*CM_CLKSEL_DPLL_PER=%#x\n", *((u32 *) 0x4A00814C));

  timing_loop();
}
Example #8
0
static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
{
    paddr_t ram_start, ram_end, ram_size;
    paddr_t contig_start, contig_end;
    paddr_t s, e;
    unsigned long ram_pages;
    unsigned long heap_pages, xenheap_pages, domheap_pages;
    unsigned long dtb_pages;
    unsigned long boot_mfn_start, boot_mfn_end;
    int i;
    void *fdt;

    if ( !early_info.mem.nr_banks )
        early_panic("No memory bank");

    /*
     * We are going to accumulate two regions here.
     *
     * The first is the bounds of the initial memory region which is
     * contiguous with the first bank. For simplicity the xenheap is
     * always allocated from this region.
     *
     * The second is the complete bounds of the regions containing RAM
     * (ie. from the lowest RAM address to the highest), which
     * includes any holes.
     *
     * We also track the number of actual RAM pages (i.e. not counting
     * the holes).
     */
    ram_size  = early_info.mem.bank[0].size;

    contig_start = ram_start = early_info.mem.bank[0].start;
    contig_end   = ram_end = ram_start + ram_size;

    for ( i = 1; i < early_info.mem.nr_banks; i++ )
    {
        paddr_t bank_start = early_info.mem.bank[i].start;
        paddr_t bank_size = early_info.mem.bank[i].size;
        paddr_t bank_end = bank_start + bank_size;

        paddr_t new_ram_size = ram_size + bank_size;
        paddr_t new_ram_start = min(ram_start,bank_start);
        paddr_t new_ram_end = max(ram_end,bank_end);

        /*
         * If the new bank is contiguous with the initial contiguous
         * region then incorporate it into the contiguous region.
         *
         * Otherwise we allow non-contigious regions so long as at
         * least half of the total RAM region actually contains
         * RAM. We actually fudge this slightly and require that
         * adding the current bank does not cause us to violate this
         * restriction.
         *
         * This restriction ensures that the frametable (which is not
         * currently sparse) does not consume all available RAM.
         */
        if ( bank_start == contig_end )
            contig_end = bank_end;
        else if ( bank_end == contig_start )
            contig_start = bank_start;
        else if ( 2 * new_ram_size < new_ram_end - new_ram_start )
            /* Would create memory map which is too sparse, so stop here. */
            break;

        ram_size = new_ram_size;
        ram_start = new_ram_start;
        ram_end = new_ram_end;
    }

    if ( i != early_info.mem.nr_banks )
    {
        early_printk("WARNING: only using %d out of %d memory banks\n",
                     i, early_info.mem.nr_banks);
        early_info.mem.nr_banks = i;
    }

    total_pages = ram_pages = ram_size >> PAGE_SHIFT;

    /*
     * Locate the xenheap using these constraints:
     *
     *  - must be 32 MiB aligned
     *  - must not include Xen itself or the boot modules
     *  - must be at most 1/8 the total RAM in the system
     *  - must be at least 128M
     *
     * We try to allocate the largest xenheap possible within these
     * constraints.
     */
    heap_pages = ram_pages;
    xenheap_pages = (heap_pages/8 + 0x1fffUL) & ~0x1fffUL;
    xenheap_pages = max(xenheap_pages, 128UL<<(20-PAGE_SHIFT));

    do
    {
        /* xenheap is always in the initial contiguous region */
        e = consider_modules(contig_start, contig_end,
                             pfn_to_paddr(xenheap_pages),
                             32<<20, 0);
        if ( e )
            break;

        xenheap_pages >>= 1;
    } while ( xenheap_pages > 128<<(20-PAGE_SHIFT) );

    if ( ! e )
        early_panic("Not not enough space for xenheap");

    domheap_pages = heap_pages - xenheap_pages;

    early_printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages)\n",
                 e - (pfn_to_paddr(xenheap_pages)), e,
                 xenheap_pages);
    early_printk("Dom heap: %lu pages\n", domheap_pages);

    setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);

    /*
     * Need a single mapped page for populating bootmem_region_list
     * and enough mapped pages for copying the DTB.
     */
    dtb_pages = (dtb_size + PAGE_SIZE-1) >> PAGE_SHIFT;
    boot_mfn_start = xenheap_mfn_end - dtb_pages - 1;
    boot_mfn_end = xenheap_mfn_end;

    init_boot_pages(pfn_to_paddr(boot_mfn_start), pfn_to_paddr(boot_mfn_end));

    /* Copy the DTB. */
    fdt = mfn_to_virt(alloc_boot_pages(dtb_pages, 1));
    copy_from_paddr(fdt, dtb_paddr, dtb_size, BUFFERABLE);
    device_tree_flattened = fdt;

    /* Add non-xenheap memory */
    for ( i = 0; i < early_info.mem.nr_banks; i++ )
    {
        paddr_t bank_start = early_info.mem.bank[i].start;
        paddr_t bank_end = bank_start + early_info.mem.bank[i].size;

        s = bank_start;
        while ( s < bank_end )
        {
            paddr_t n = bank_end;

            e = next_module(s, &n);

            if ( e == ~(paddr_t)0 )
            {
                e = n = ram_end;
            }

            /*
             * Module in a RAM bank other than the one which we are
             * not dealing with here.
             */
            if ( e > bank_end )
                e = bank_end;

            /* Avoid the xenheap */
            if ( s < pfn_to_paddr(xenheap_mfn_start+xenheap_pages)
                 && pfn_to_paddr(xenheap_mfn_start) < e )
            {
                e = pfn_to_paddr(xenheap_mfn_start);
                n = pfn_to_paddr(xenheap_mfn_start+xenheap_pages);
            }

            dt_unreserved_regions(s, e, init_boot_pages, 0);

            s = n;
        }
    }

    /* Frame table covers all of RAM region, including holes */
    setup_frametable_mappings(ram_start, ram_end);
    max_page = PFN_DOWN(ram_end);

    /* Add xenheap memory that was not already added to the boot
       allocator. */
    init_xenheap_pages(pfn_to_paddr(xenheap_mfn_start),
                       pfn_to_paddr(boot_mfn_start));

    end_boot_allocator();
}