int arch_remove_memory(u64 start, u64 size)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct zone *zone;
	int ret;

	zone = page_zone(pfn_to_page(start_pfn));
	ret = __remove_pages(zone, start_pfn, nr_pages);
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	return ret;
}
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
	unsigned long start, start_pfn;
	struct zone *zone;
	int ret;

	start_pfn = base >> PAGE_SHIFT;

	if (!pfn_valid(start_pfn)) {
		memblock_remove(base, memblock_size);
		return 0;
	}

	zone = page_zone(pfn_to_page(start_pfn));

	ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT);
	if (ret)
		return ret;

	memblock_remove(base, memblock_size);

	start = (unsigned long)__va(base);
	ret = remove_section_mapping(start, start + memblock_size);

	vm_unmap_aliases();

	return ret;
}
Beispiel #3
0
int __meminit arch_remove_memory(int nid, u64 start, u64 size,
					struct vmem_altmap *altmap)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct page *page;
	int ret;

	/*
	 * If we have an altmap then we need to skip over any reserved PFNs
	 * when querying the zone.
	 */
	page = pfn_to_page(start_pfn);
	if (altmap)
		page += vmem_altmap_offset(altmap);

	ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
	if (ret)
		return ret;

	/* Remove htab bolted mappings for this section of memory */
	start = (unsigned long)__va(start);
	flush_inval_dcache_range(start, start + size);
	ret = remove_section_mapping(start, start + size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();

	resize_hpt_for_hotplug(memblock_phys_mem_size());

	return ret;
}
Beispiel #4
0
/**
 * unpack_table - unpack a dfa table (one of accept, default, base, next check)
 * @blob: data to unpack
 * @bsize: size of blob
 *
 * Returns: pointer to table else NULL on failure
 *
 * NOTE: must be freed by free_table (not kmalloc)
 */
static struct table_header *unpack_table(char *blob, size_t bsize)
{
	struct table_header *table = NULL;
	struct table_header th;
	int unmap_alias = 0;
	size_t tsize;

	if (bsize < sizeof(struct table_header))
		goto out;

	/* loaded td_id's start at 1, subtract 1 now to avoid doing
	 * it every time we use td_id as an index
	 */
	th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1;
	th.td_flags = be16_to_cpu(*(u16 *) (blob + 2));
	th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8));
	blob += sizeof(struct table_header);

	if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 ||
	      th.td_flags == YYTD_DATA8))
		goto out;

	tsize = table_size(th.td_lolen, th.td_flags);
	if (bsize < tsize)
		goto out;

	/* freed by free_table */
	table = kmalloc(tsize, GFP_KERNEL | __GFP_NOWARN);
	if (!table) {
		tsize = tsize < sizeof(struct work_struct) ?
			sizeof(struct work_struct) : tsize;
		unmap_alias = 1;
		table = vmalloc(tsize);
	}
	if (table) {
		*table = th;
		if (th.td_flags == YYTD_DATA8)
			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
				     u8, byte_to_byte);
		else if (th.td_flags == YYTD_DATA16)
			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
				     u16, be16_to_cpu);
		else if (th.td_flags == YYTD_DATA32)
			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
				     u32, be32_to_cpu);
		else
			goto fail;
	}

out:
	if (unmap_alias)
		vm_unmap_aliases();
	return table;
fail:
	free_table(table);
	return NULL;
}
static int change_memory_common(unsigned long addr, int numpages,
				pgprot_t set_mask, pgprot_t clear_mask)
{
	unsigned long start = addr;
	unsigned long size = PAGE_SIZE*numpages;
	unsigned long end = start + size;
	struct vm_struct *area;
	int i;

	if (!PAGE_ALIGNED(addr)) {
		start &= PAGE_MASK;
		end = start + size;
		WARN_ON_ONCE(1);
	}

	/*
	 * Kernel VA mappings are always live, and splitting live section
	 * mappings into page mappings may cause TLB conflicts. This means
	 * we have to ensure that changing the permission bits of the range
	 * we are operating on does not result in such splitting.
	 *
	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
	 * Those are guaranteed to consist entirely of page mappings, and
	 * splitting is never needed.
	 *
	 * So check whether the [addr, addr + size) interval is entirely
	 * covered by precisely one VM area that has the VM_ALLOC flag set.
	 */
	area = find_vm_area((void *)addr);
	if (!area ||
	    end > (unsigned long)area->addr + area->size ||
	    !(area->flags & VM_ALLOC))
		return -EINVAL;

	if (!numpages)
		return 0;

	/*
	 * If we are manipulating read-only permissions, apply the same
	 * change to the linear mapping of the pages that back this VM area.
	 */
	if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
			    pgprot_val(clear_mask) == PTE_RDONLY)) {
		for (i = 0; i < area->nr_pages; i++) {
			__change_memory_common((u64)page_address(area->pages[i]),
					       PAGE_SIZE, set_mask, clear_mask);
		}
	}

	/*
	 * Get rid of potentially aliasing lazily unmapped vm areas that may
	 * have permissions set that deviate from the ones we are setting here.
	 */
	vm_unmap_aliases();

	return __change_memory_common(start, size, set_mask, clear_mask);
}
Beispiel #6
0
/**
 * unpack_table - unpack a dfa table (one of accept, default, base, next check)
 * @blob: data to unpack (NOT NULL)
 * @bsize: size of blob
 *
 * Returns: pointer to table else NULL on failure
 *
 * NOTE: must be freed by kvfree (not kfree)
 */
static struct table_header *unpack_table(char *blob, size_t bsize)
{
	struct table_header *table = NULL;
	struct table_header th;
	size_t tsize;

	if (bsize < sizeof(struct table_header))
		goto out;

	/* loaded td_id's start at 1, subtract 1 now to avoid doing
	 * it every time we use td_id as an index
	 */
	th.td_id = be16_to_cpu(*(__be16 *) (blob)) - 1;
	if (th.td_id > YYTD_ID_MAX)
		goto out;
	th.td_flags = be16_to_cpu(*(__be16 *) (blob + 2));
	th.td_lolen = be32_to_cpu(*(__be32 *) (blob + 8));
	blob += sizeof(struct table_header);

	if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 ||
	      th.td_flags == YYTD_DATA8))
		goto out;

	tsize = table_size(th.td_lolen, th.td_flags);
	if (bsize < tsize)
		goto out;

	table = kvzalloc(tsize, GFP_KERNEL);
	if (table) {
		table->td_id = th.td_id;
		table->td_flags = th.td_flags;
		table->td_lolen = th.td_lolen;
		if (th.td_flags == YYTD_DATA8)
			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
				     u8, u8, byte_to_byte);
		else if (th.td_flags == YYTD_DATA16)
			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
				     u16, __be16, be16_to_cpu);
		else if (th.td_flags == YYTD_DATA32)
			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
				     u32, __be32, be32_to_cpu);
		else
			goto fail;
		/* if table was vmalloced make sure the page tables are synced
		 * before it is used, as it goes live to all cpus.
		 */
		if (is_vmalloc_addr(table))
			vm_unmap_aliases();
	}

out:
	return table;
fail:
	kvfree(table);
	return NULL;
}
Beispiel #7
0
static int pseries_remove_memory(u64 start, u64 size)
{
    int ret;

    /* Remove htab bolted mappings for this section of memory */
    start = (unsigned long)__va(start);
    ret = remove_section_mapping(start, start + size);

    /* Ensure all vmalloc mappings are flushed in case they also
     * hit that section of memory
     */
    vm_unmap_aliases();

    return ret;
}
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
	unsigned long start, start_pfn;
	struct zone *zone;
	int ret;

	start_pfn = base >> PAGE_SHIFT;

	if (!pfn_valid(start_pfn)) {
		memblock_remove(base, memblock_size);
		return 0;
	}

	zone = page_zone(pfn_to_page(start_pfn));

	/*
	 * Remove section mappings and sysfs entries for the
	 * section of the memory we are removing.
	 *
	 * NOTE: Ideally, this should be done in generic code like
	 * remove_memory(). But remove_memory() gets called by writing
	 * to sysfs "state" file and we can't remove sysfs entries
	 * while writing to it. So we have to defer it to here.
	 */
	ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT);
	if (ret)
		return ret;

	/*
	 * Update memory regions for memory remove
	 */
	memblock_remove(base, memblock_size);

	/*
	 * Remove htab bolted mappings for this section of memory
	 */
	start = (unsigned long)__va(base);
	ret = remove_section_mapping(start, start + memblock_size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();

	return ret;
}
static struct table_header *unpack_table(char *blob, size_t bsize)
{
	struct table_header *table = NULL;
	struct table_header th;
	size_t tsize;

	if (bsize < sizeof(struct table_header))
		goto out;

	th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1;
	th.td_flags = be16_to_cpu(*(u16 *) (blob + 2));
	th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8));
	blob += sizeof(struct table_header);

	if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 ||
	      th.td_flags == YYTD_DATA8))
		goto out;

	tsize = table_size(th.td_lolen, th.td_flags);
	if (bsize < tsize)
		goto out;

	table = kvmalloc(tsize);
	if (table) {
		*table = th;
		if (th.td_flags == YYTD_DATA8)
			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
				     u8, byte_to_byte);
		else if (th.td_flags == YYTD_DATA16)
			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
				     u16, be16_to_cpu);
		else if (th.td_flags == YYTD_DATA32)
			UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
				     u32, be32_to_cpu);
		else
			goto fail;
	}

out:
	if (is_vmalloc_addr(table))
		vm_unmap_aliases();
	return table;
fail:
	kvfree(table);
	return NULL;
}