Exemple #1
0
/**
 * sn_init_pdas - setup node data areas
 *
 * One time setup for Node Data Area.  Called by sn_setup().
 */
static void __init sn_init_pdas(char **cmdline_p)
{
	cnodeid_t cnode;

	memset(pda->cnodeid_to_nasid_table, -1,
	       sizeof(pda->cnodeid_to_nasid_table));
	for_each_online_node(cnode)
		pda->cnodeid_to_nasid_table[cnode] =
		    pxm_to_nasid(nid_to_pxm_map[cnode]);

	numionodes = num_online_nodes();
	scan_for_ionodes();

	/*
	 * Allocate & initalize the nodepda for each node.
	 */
	for_each_online_node(cnode) {
		nodepdaindr[cnode] =
		    alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
		memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
		memset(nodepdaindr[cnode]->phys_cpuid, -1, 
		    sizeof(nodepdaindr[cnode]->phys_cpuid));
	}

	/*
	 * Allocate & initialize nodepda for TIOs.  For now, put them on node 0.
	 */
	for (cnode = num_online_nodes(); cnode < numionodes; cnode++) {
		nodepdaindr[cnode] =
		    alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
		memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
	}

	/*
	 * Now copy the array of nodepda pointers to each nodepda.
	 */
	for (cnode = 0; cnode < numionodes; cnode++)
		memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
		       sizeof(nodepdaindr));

	/*
	 * Set up IO related platform-dependent nodepda fields.
	 * The following routine actually sets up the hubinfo struct
	 * in nodepda.
	 */
	for_each_online_node(cnode) {
		bte_init_node(nodepdaindr[cnode], cnode);
	}

	/*
	 * Initialize the per node hubdev.  This includes IO Nodes and 
	 * headless/memless nodes.
	 */
	for (cnode = 0; cnode < numionodes; cnode++) {
		hubdev_init_node(nodepdaindr[cnode], cnode);
	}
}
Exemple #2
0
/*
 * Great future plan:
 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
 * Always point %gs to its beginning
 */
void __init setup_per_cpu_areas(void)
{ 
	int i;
	unsigned long size;

#ifdef CONFIG_HOTPLUG_CPU
	prefill_possible_map();
#endif

	/* Copy section for each CPU (we discard the original) */
	size = PERCPU_ENOUGH_ROOM;

	printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
	for_each_cpu_mask (i, cpu_possible_map) {
		char *ptr;

		if (!NODE_DATA(cpu_to_node(i))) {
			printk("cpu with no node %d, num_online_nodes %d\n",
			       i, num_online_nodes());
			ptr = alloc_bootmem(size);
		} else { 
			ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
		}
		if (!ptr)
			panic("Cannot allocate cpu data for CPU %d\n", i);
		cpu_pda(i)->data_offset = ptr - __per_cpu_start;
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
	}
Exemple #3
0
/*
 * Set up the platform-dependent fields in the nodepda.
 */
void init_platform_nodepda(nodepda_t *npda, cnodeid_t node)
{
    hubinfo_t hubinfo;

    extern void router_map_init(nodepda_t *);
    extern void router_queue_init(nodepda_t *,cnodeid_t);
    extern void intr_init_vecblk(nodepda_t *, cnodeid_t, int);

    /* Allocate per-node platform-dependent data */
    hubinfo = (hubinfo_t)alloc_bootmem_node(NODE_DATA(node), sizeof(struct hubinfo_s));

    npda->pdinfo = (void *)hubinfo;
    hubinfo->h_nodepda = npda;
    hubinfo->h_cnodeid = node;

    spin_lock_init(&hubinfo->h_crblock);

    npda->xbow_peer = INVALID_NASID;

    /*
     * Initialize the linked list of
     * router info pointers to the dependent routers
     */
    npda->npda_rip_first = NULL;

    /*
     * npda_rip_last always points to the place
     * where the next element is to be inserted
     * into the list
     */
    npda->npda_rip_last = &npda->npda_rip_first;
    npda->geoid.any.type = GEO_TYPE_INVALID;

    init_MUTEX_LOCKED(&npda->xbow_sema); /* init it locked? */
}
Exemple #4
0
struct pci_iommu_arena * __init
iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
		     unsigned long window_size, unsigned long align)
{
	unsigned long mem_size;
	struct pci_iommu_arena *arena;

	mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));

	/* Note that the TLB lookup logic uses bitwise concatenation,
	   not addition, so the required arena alignment is based on
	   the size of the window.  Retain the align parameter so that
	   particular systems can over-align the arena.  */
	if (align < mem_size)
		align = mem_size;


#ifdef CONFIG_DISCONTIGMEM

	arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
	if (!NODE_DATA(nid) || !arena) {
		printk("%s: couldn't allocate arena from node %d\n"
		       "    falling back to system-wide allocation\n",
		       __func__, nid);
		arena = alloc_bootmem(sizeof(*arena));
	}

	arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
	if (!NODE_DATA(nid) || !arena->ptes) {
		printk("%s: couldn't allocate arena ptes from node %d\n"
		       "    falling back to system-wide allocation\n",
		       __func__, nid);
		arena->ptes = __alloc_bootmem(mem_size, align, 0);
	}

#else /* CONFIG_DISCONTIGMEM */

	arena = alloc_bootmem(sizeof(*arena));
	arena->ptes = __alloc_bootmem(mem_size, align, 0);

#endif /* CONFIG_DISCONTIGMEM */

	spin_lock_init(&arena->lock);
	arena->hose = hose;
	arena->dma_base = base;
	arena->size = window_size;
	arena->next_entry = 0;

	/* Align allocations to a multiple of a page size.  Not needed
	   unless there are chip bugs.  */
	arena->align_entry = 1;

	return arena;
}
Exemple #5
0
void __init init_nonpaged_pool(void * pgdat, int size, struct _npool_data * pool, void ** start , void ** free)
{
    struct mymap * mytmp;

    *start=alloc_bootmem_node(pgdat,size);
    *start= (struct page *)(PAGE_OFFSET +
                            MAP_ALIGN((unsigned long)(*start) - PAGE_OFFSET));
    *free= *start;
    struct _lsthds * l = pool->npool$ar_lsthds;
    l->lsthds$l_variablelist_unused=*free;
    mytmp=*start;
    mytmp->flink=0; /* ? */
    mytmp->size=size;
    poison_packet(mytmp,size,1);
}
Exemple #6
0
static struct mem_section *sparse_index_alloc(int nid)
{
	struct mem_section *section = NULL;
	unsigned long array_size = SECTIONS_PER_ROOT *
				   sizeof(struct mem_section);

	if (slab_is_available())
		section = kmalloc_node(array_size, GFP_KERNEL, nid);
	else
		section = alloc_bootmem_node(NODE_DATA(nid), array_size);

	if (section)
		memset(section, 0, array_size);

	return section;
}
Exemple #7
0
static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
{
	struct page *map;
	int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));

	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
	if (map)
		return map;

	map = alloc_bootmem_node(NODE_DATA(nid),
			sizeof(struct page) * PAGES_PER_SECTION);
	if (map)
		return map;

	printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
	mem_section[pnum].section_mem_map = 0;
	return NULL;
}
Exemple #8
0
static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
{
	struct page *map;
	struct mem_section *ms = __nr_to_section(pnum);
	int nid = sparse_early_nid(ms);

	map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
	if (map)
		return map;

	map = alloc_bootmem_node(NODE_DATA(nid),
			sizeof(struct page) * PAGES_PER_SECTION);
	if (map)
		return map;

	printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
	ms->section_mem_map = 0;
	return NULL;
}
Exemple #9
0
void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
{
	void *ptr;

	if (slab_is_available())
		ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
				   GFP_ATOMIC, node);
	else
		ptr = alloc_bootmem_node(NODE_DATA(node),
				nr * sizeof(*desc->kstat_irqs));

	/*
	 * don't overwite if can not get new one
	 * init_copy_kstat_irqs() could still use old one
	 */
	if (ptr) {
		printk(KERN_DEBUG "  alloc kstat_irqs on node %d\n", node);
		desc->kstat_irqs = ptr;
	}
}
Exemple #10
0
struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
{
	struct irq_desc *desc;
	unsigned long flags;

	if (irq >= nr_irqs) {
		WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
			irq, nr_irqs);
		return NULL;
	}

	desc = irq_desc_ptrs[irq];
	if (desc)
		return desc;

	spin_lock_irqsave(&sparse_irq_lock, flags);

	/* We have to check it to avoid races with another CPU */
	desc = irq_desc_ptrs[irq];
	if (desc)
		goto out_unlock;

	if (slab_is_available())
		desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
	else
		desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));

	printk(KERN_DEBUG "  alloc irq_desc for %d on node %d\n", irq, node);
	if (!desc) {
		printk(KERN_ERR "can not alloc irq_desc\n");
		BUG_ON(1);
	}
	init_one_irq_desc(irq, desc, node);

	irq_desc_ptrs[irq] = desc;

out_unlock:
	spin_unlock_irqrestore(&sparse_irq_lock, flags);

	return desc;
}
Exemple #11
0
void __init setup_per_cpu_areas(void)
{
	int i;
	unsigned long size;
	char *ptr;

	/* Copy section for each CPU (we discard the original) */
	size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
#ifdef CONFIG_MODULES
	if (size < PERCPU_ENOUGH_ROOM)
		size = PERCPU_ENOUGH_ROOM;
#endif

	for_each_possible_cpu(i) {
		ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
		if (!ptr)
			panic("Cannot allocate cpu data for CPU %d\n", i);

		paca[i].data_offset = ptr - __per_cpu_start;
		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
	}
}
Exemple #12
0
/*
 * Nodes can be initialized parallely, in no particular order.
 */
void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
	unsigned long *zones_size, unsigned long zone_start_paddr, 
	unsigned long *zholes_size)
{
	int i, size = 0;
	struct page *discard;

	if (mem_map == (mem_map_t *)NULL)
		mem_map = (mem_map_t *)PAGE_OFFSET;

	free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr,
					zholes_size, pmap);
	pgdat->node_id = nid;

	/*
	 * Get space for the valid bitmap.
	 */
	for (i = 0; i < MAX_NR_ZONES; i++)
		size += zones_size[i];
	size = LONG_ALIGN((size + 7) >> 3);
	pgdat->valid_addr_bitmap = (unsigned long *)alloc_bootmem_node(pgdat, size);
	memset(pgdat->valid_addr_bitmap, 0, size);
}
Exemple #13
0
/*
 * Set up the platform-dependent fields in the nodepda.
 */
void init_platform_nodepda(nodepda_t *npda, cnodeid_t node)
{
	hubinfo_t hubinfo;
#ifdef CONFIG_IA64_SGI_SN1
	int	  sn;
#endif

	extern void router_map_init(nodepda_t *);
	extern void router_queue_init(nodepda_t *,cnodeid_t);
	extern void intr_init_vecblk(nodepda_t *, cnodeid_t, int);

	/* Allocate per-node platform-dependent data */
	hubinfo = (hubinfo_t)alloc_bootmem_node(NODE_DATA(node), sizeof(struct hubinfo_s));

	npda->pdinfo = (void *)hubinfo;
	hubinfo->h_nodepda = npda;
	hubinfo->h_cnodeid = node;
	hubinfo->h_nasid = COMPACT_TO_NASID_NODEID(node);

	spin_lock_init(&hubinfo->h_crblock);

	hubinfo->h_widgetid = hub_widget_id(hubinfo->h_nasid);
	npda->xbow_peer = INVALID_NASID;

	/* 
	 * Initialize the linked list of
	 * router info pointers to the dependent routers
	 */
	npda->npda_rip_first = NULL;

	/*
	 * npda_rip_last always points to the place
	 * where the next element is to be inserted
	 * into the list 
	 */
	npda->npda_rip_last = &npda->npda_rip_first;
	npda->module_id = INVALID_MODULE;

#ifdef CONFIG_IA64_SGI_SN1
	/*
	* Initialize the interrupts.
	* On sn2, this is done at pci init time,
	* because sn2 needs the cpus checked in
	* when it initializes interrupts.  This is
	* so we don't see all the nodes as headless.
	*/
	for (sn=0; sn<NUM_SUBNODES; sn++) {
		intr_init_vecblk(npda, node, sn);
	}
#endif /* CONFIG_IA64_SGI_SN1 */

	mutex_init_locked(&npda->xbow_sema); /* init it locked? */

#ifdef	LATER

	/* Setup the (module,slot) --> nic mapping for all the routers
	 * in the system. This is useful during error handling when
	 * there is no shared memory.
	 */
	router_map_init(npda);

	/* Allocate memory for the per-node router traversal queue */
	router_queue_init(npda,node);
	npda->sbe_info = alloc_bootmem_node(NODE_DATA(node), sizeof (sbe_info_t));
	ASSERT(npda->sbe_info);

#endif /* LATER */
}