예제 #1
0
static int spu_map_resource(struct spu *spu, int nr,
			    void __iomem** virt, unsigned long *phys)
{
	struct device_node *np = spu->devnode;
	unsigned long start_pfn, nr_pages;
	struct pglist_data *pgdata;
	struct zone *zone;
	struct resource resource = { };
	unsigned long len;
	int ret;

	ret = of_address_to_resource(np, nr, &resource);
	if (ret)
		goto out;

	if (phys)
		*phys = resource.start;
	len = resource.end - resource.start + 1;
	*virt = ioremap(resource.start, len);
	if (!*virt)
		ret = -EINVAL;

	start_pfn = resource.start >> PAGE_SHIFT;
	nr_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;

	pgdata = NODE_DATA(spu->node);
	zone = pgdata->node_zones;

	ret = __add_pages(zone, start_pfn, nr_pages);

out:
	return ret;
}
예제 #2
0
static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
		const char *prop)
{
	const struct address_prop {
		unsigned long address;
		unsigned int len;
	} __attribute__((packed)) *p;
	int proplen;

	unsigned long start_pfn, nr_pages;
	struct pglist_data *pgdata;
	struct zone *zone;
	int ret;

	p = get_property(spe, prop, &proplen);
	WARN_ON(proplen != sizeof (*p));

	start_pfn = p->address >> PAGE_SHIFT;
	nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;

	pgdata = NODE_DATA(spu->node);
	zone = pgdata->node_zones;

	ret = __add_pages(zone, start_pfn, nr_pages);

	return ret;
}
예제 #3
0
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
	struct pglist_data *pgdata;
	struct zone *zone;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int rc;

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	pgdata = NODE_DATA(nid);

	start = (unsigned long)__va(start);
	rc = create_section_mapping(start, start + size);
	if (rc) {
		pr_warning(
			"Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
			start, start + size, rc);
		return -EFAULT;
	}

	/* this should work for most non-highmem platforms */
	zone = pgdata->node_zones +
		zone_for_memory(nid, start, size, 0, for_device);

	return __add_pages(nid, zone, start_pfn, nr_pages);
}
예제 #4
0
파일: init.c 프로젝트: AllenWeb/linux
int arch_add_memory(u64 start, u64 size)
{
	struct pglist_data *pgdata = &contig_page_data;
	struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	return __add_pages(zone, start_pfn, nr_pages);
}
예제 #5
0
int arch_add_memory(int nid, u64 start, u64 size)
{
	struct pglist_data *pgdat;
	struct zone *zone;
	int rc;

	pgdat = NODE_DATA(nid);
	zone = pgdat->node_zones + ZONE_MOVABLE;
	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
	rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
예제 #6
0
int arch_add_memory(int nid, u64 start, u64 size)
{
	pg_data_t *pgdat;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	pgdat = NODE_DATA(nid);

	/* We only have ZONE_NORMAL, so this is easy.. */
	ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
	if (unlikely(ret))
		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);

	return ret;
}
예제 #7
0
파일: mem.c 프로젝트: 1x23/unifi-gpl
/*
 * This works only for the non-NUMA case.  Later, we'll need a lookup
 * to convert from real physical addresses to nid, that doesn't use
 * pfn_to_nid().
 */
int __devinit add_memory(u64 start, u64 size)
{
	struct pglist_data *pgdata = NODE_DATA(0);
	struct zone *zone;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	start += KERNELBASE;
	create_section_mapping(start, start + size);

	/* this should work for most non-highmem platforms */
	zone = pgdata->node_zones;

	return __add_pages(zone, start_pfn, nr_pages);

	return 0;
}
예제 #8
0
파일: gnttab.c 프로젝트: zhoupeng/spice4xen
static void setup_grant_area(void)
{
    ulong pgs;
    int err;
    struct zone *zone;
    struct pglist_data *pgdata;
    int nid;

    pgs = find_grant_maps();
    setup_foreign_segment();

    printk("%s: Xen VIO will use a foreign address space of 0x%lx pages\n",
           __func__, pgs);

    /* add pages to the zone */
    nid = 0;
    pgdata = NODE_DATA(nid);
    zone = pgdata->node_zones;

    err = __add_pages(zone, foreign_map_pfn, pgs);

    if (err < 0) {
        printk(KERN_EMERG "%s: add_pages(0x%lx, 0x%lx) = %d\n",
               __func__, foreign_map_pfn, pgs, err);
        BUG();
    }

    /* create a bitmap to manage these pages */
    foreign_map_bitmap = kmalloc(BITS_TO_LONGS(pgs) * sizeof(long),
                                 GFP_KERNEL);
    if (foreign_map_bitmap == NULL) {
        printk(KERN_EMERG
               "%s: could not allocate foreign_map_bitmap to "
               "manage 0x%lx foreign pages\n", __func__, pgs);
        BUG();
    }
    /* I'm paranoid so make sure we assign the top bits so we
     * don't give them away */
    bitmap_fill(&foreign_map_bitmap[BITS_TO_LONGS(pgs) - 1],
                BITS_PER_LONG);
    /* now clear all the real bits */
    bitmap_zero(foreign_map_bitmap, pgs);

    foreign_map_pgs = pgs;
}
예제 #9
0
파일: mem.c 프로젝트: 03199618/linux
int arch_add_memory(int nid, u64 start, u64 size)
{
	struct pglist_data *pgdata;
	struct zone *zone;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	pgdata = NODE_DATA(nid);

	start = (unsigned long)__va(start);
	if (create_section_mapping(start, start + size))
		return -EINVAL;

	/* this should work for most non-highmem platforms */
	zone = pgdata->node_zones;

	return __add_pages(nid, zone, start_pfn, nr_pages);
}
예제 #10
0
/*
 * Memory is added always to NORMAL zone. This means you will never get
 * additional DMA/DMA32 memory.
 */
int arch_add_memory(int nid, u64 start, u64 size)
{
	struct pglist_data *pgdat = NODE_DATA(nid);
	struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	init_memory_mapping(start, (start + size -1));

	ret = __add_pages(zone, start_pfn, nr_pages);
	if (ret)
		goto error;

	return ret;
error:
	printk("%s: Problem encountered in __add_pages!\n", __func__);
	return ret;
}
예제 #11
0
파일: mem.c 프로젝트: AlexShiLucky/linux
int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
		bool want_memblock)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int rc;

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	start = (unsigned long)__va(start);
	rc = create_section_mapping(start, start + size, nid);
	if (rc) {
		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
			start, start + size, rc);
		return -EFAULT;
	}
	flush_inval_dcache_range(start, start + size);

	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
}
예제 #12
0
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
	unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
	struct zone *zone;
	int rc;

	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
	for_each_zone(zone) {
		if (zone_idx(zone) != ZONE_MOVABLE) {
			/* Add range within existing zone limits */
			zone_start_pfn = zone->zone_start_pfn;
			zone_end_pfn = zone->zone_start_pfn +
				       zone->spanned_pages;
		} else {
			/* Add remaining range to ZONE_MOVABLE */
			zone_start_pfn = start_pfn;
			zone_end_pfn = start_pfn + size_pages;
		}
		if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
			continue;
		nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
			   zone_end_pfn - start_pfn : size_pages;
		rc = __add_pages(nid, zone, start_pfn, nr_pages);
		if (rc)
			break;
		start_pfn += nr_pages;
		size_pages -= nr_pages;
		if (!size_pages)
			break;
	}
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
예제 #13
0
파일: init.c 프로젝트: 1059232202/linux
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
	unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
	unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long size_pages = PFN_DOWN(size);
	unsigned long nr_pages;
	int rc, zone_enum;

	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;

	while (size_pages > 0) {
		if (start_pfn < dma_end_pfn) {
			nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
				   dma_end_pfn - start_pfn : size_pages;
			zone_enum = ZONE_DMA;
		} else if (start_pfn < normal_end_pfn) {
			nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
				   normal_end_pfn - start_pfn : size_pages;
			zone_enum = ZONE_NORMAL;
		} else {
			nr_pages = size_pages;
			zone_enum = ZONE_MOVABLE;
		}
		rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
				 start_pfn, size_pages);
		if (rc)
			break;
		start_pfn += nr_pages;
		size_pages -= nr_pages;
	}
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}