Exemplo n.º 1
0
static void packet_empty_list(void)
{
	struct list_head *ptemp_list;
	struct list_head *pnext_list;
	struct packet_data *newpacket;

	ptemp_list = (&packet_data_head.list)->next;
	while (!list_empty(ptemp_list)) {
		newpacket =
			list_entry(ptemp_list, struct packet_data, list);
		pnext_list = ptemp_list->next;
		list_del(ptemp_list);
		ptemp_list = pnext_list;
		/*
		 * zero out the RBU packet memory before freeing
		 * to make sure there are no stale RBU packets left in memory
		 */
		memset(newpacket->data, 0, rbu_data.packetsize);
#ifdef CONFIG_XEN
		if (newpacket->ordernum)
			xen_destroy_contiguous_region(
				(unsigned long)newpacket->data,
				newpacket->ordernum);
#endif

		free_pages((unsigned long) newpacket->data,
			newpacket->ordernum);
		kfree(newpacket);
	}
	rbu_data.packet_read_count = 0;
	rbu_data.num_packets = 0;
	rbu_data.imagesize = 0;
}
Exemplo n.º 2
0
static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
{
	int order = 0;

	while (skbuff_order_cachep[order] != cachep)
		order++;

	if (order != 0)
		xen_destroy_contiguous_region((unsigned long)buf, order);
}
Exemplo n.º 3
0
void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
{
	unsigned long flags; /* can be called from interrupt context */

	if (PTRS_PER_PMD > 1) {
		if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
			xen_destroy_contiguous_region((unsigned long)pgd, 0);
	} else {
		spin_lock_irqsave(&pgd_lock, flags);
		pgd_list_del(pgd);
		spin_unlock_irqrestore(&pgd_lock, flags);

		pgd_test_and_unpin(pgd);
	}
}
Exemplo n.º 4
0
static int create_packet(void *data, size_t length)
{
	struct packet_data *newpacket;
	int ordernum = 0;
	int retval = 0;
	unsigned int packet_array_size = 0;
	void **invalid_addr_packet_array = NULL;
	void *packet_data_temp_buf = NULL;
	unsigned int idx = 0;

	pr_debug("create_packet: entry \n");

	if (!rbu_data.packetsize) {
		pr_debug("create_packet: packetsize not specified\n");
		retval = -EINVAL;
		goto out_noalloc;
	}

	spin_unlock(&rbu_data.lock);

	newpacket = kzalloc(sizeof (struct packet_data), GFP_KERNEL);

	if (!newpacket) {
		printk(KERN_WARNING
			"dell_rbu:%s: failed to allocate new "
			"packet\n", __func__);
		retval = -ENOMEM;
		spin_lock(&rbu_data.lock);
		goto out_noalloc;
	}

	ordernum = get_order(length);

	/*
	 * BIOS errata mean we cannot allocate packets below 1MB or they will
	 * be overwritten by BIOS.
	 *
	 * array to temporarily hold packets
	 * that are below the allocation floor
	 *
	 * NOTE: very simplistic because we only need the floor to be at 1MB
	 *       due to BIOS errata. This shouldn't be used for higher floors
	 *       or you will run out of mem trying to allocate the array.
	 */
	packet_array_size = max(
	       		(unsigned int)(allocation_floor / rbu_data.packetsize),
			(unsigned int)1);
	invalid_addr_packet_array = kzalloc(packet_array_size * sizeof(void*),
						GFP_KERNEL);

	if (!invalid_addr_packet_array) {
		printk(KERN_WARNING
			"dell_rbu:%s: failed to allocate "
			"invalid_addr_packet_array \n",
			__func__);
		retval = -ENOMEM;
		spin_lock(&rbu_data.lock);
		goto out_alloc_packet;
	}

	while (!packet_data_temp_buf) {
		packet_data_temp_buf = (unsigned char *)
			__get_free_pages(GFP_KERNEL, ordernum);
		if (!packet_data_temp_buf) {
			printk(KERN_WARNING
				"dell_rbu:%s: failed to allocate new "
				"packet\n", __func__);
			retval = -ENOMEM;
			spin_lock(&rbu_data.lock);
			goto out_alloc_packet_array;
		}
#ifdef CONFIG_XEN
		if (ordernum && xen_create_contiguous_region(
			(unsigned long)packet_data_temp_buf, ordernum, 0)) {
			free_pages((unsigned long)packet_data_temp_buf,
				   ordernum);
			printk(KERN_WARNING
				"dell_rbu:%s: failed to adjust new "
				"packet\n", __func__);
			retval = -ENOMEM;
			spin_lock(&rbu_data.lock);
			goto out_alloc_packet_array;
		}
#endif

		if ((unsigned long)virt_to_bus(packet_data_temp_buf)
				< allocation_floor) {
#ifdef CONFIG_XEN
			if (ordernum)
				xen_destroy_contiguous_region(
					(unsigned long)packet_data_temp_buf,
					ordernum);
#endif
			pr_debug("packet 0x%lx below floor at 0x%lx.\n",
					(unsigned long)virt_to_phys(
						packet_data_temp_buf),
					allocation_floor);
			invalid_addr_packet_array[idx++] = packet_data_temp_buf;
			packet_data_temp_buf = NULL;
		}
	}
	spin_lock(&rbu_data.lock);

	newpacket->data = packet_data_temp_buf;

	pr_debug("create_packet: newpacket at physical addr %lx\n",
		(unsigned long)virt_to_bus(newpacket->data));

	/* packets may not have fixed size */
	newpacket->length = length;
	newpacket->ordernum = ordernum;
	++rbu_data.num_packets;

	/* initialize the newly created packet headers */
	INIT_LIST_HEAD(&newpacket->list);
	list_add_tail(&newpacket->list, &packet_data_head.list);

	memcpy(newpacket->data, data, length);

	pr_debug("create_packet: exit \n");

out_alloc_packet_array:
	/* always free packet array */
	for (;idx>0;idx--) {
		pr_debug("freeing unused packet below floor 0x%lx.\n",
			(unsigned long)virt_to_bus(
				invalid_addr_packet_array[idx-1]));
		free_pages((unsigned long)invalid_addr_packet_array[idx-1],
			ordernum);
	}
	kfree(invalid_addr_packet_array);

out_alloc_packet:
	/* if error, free data */
	if (retval)
		kfree(newpacket);

out_noalloc:
	return retval;
}
Exemplo n.º 5
0
static int
dom0_memory_free(uint32_t rsv_size)
{
	uint64_t vstart, vaddr;
	uint32_t i, num_block, size;

	if (!xen_pv_domain())
		return -1;

	/* each memory block is 2M */
	num_block = rsv_size / SIZE_PER_BLOCK;
	if (num_block == 0)
		return -EINVAL;

	/* free all memory blocks of size of 4M and destroy contiguous region */
	for (i = 0; i < dom0_dev.num_bigblock * 2; i += 2) {
		vstart = rsv_mm_info[i].vir_addr;
		if (vstart) {
		#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
			if (rsv_mm_info[i].exchange_flag)
				xen_destroy_contiguous_region(vstart,
						DOM0_CONTIG_NUM_ORDER);
			if (rsv_mm_info[i + 1].exchange_flag)
				xen_destroy_contiguous_region(vstart +
						DOM0_MEMBLOCK_SIZE,
						DOM0_CONTIG_NUM_ORDER);
		#else
			if (rsv_mm_info[i].exchange_flag)
				xen_destroy_contiguous_region(rsv_mm_info[i].pfn
					* PAGE_SIZE,
					DOM0_CONTIG_NUM_ORDER);
			if (rsv_mm_info[i + 1].exchange_flag)
				xen_destroy_contiguous_region(rsv_mm_info[i].pfn
					* PAGE_SIZE + DOM0_MEMBLOCK_SIZE,
					DOM0_CONTIG_NUM_ORDER);
		#endif

			size = DOM0_MEMBLOCK_SIZE * 2;
			vaddr = vstart;
			while (size > 0) {
				ClearPageReserved(virt_to_page(vaddr));
				vaddr += PAGE_SIZE;
				size -= PAGE_SIZE;
			}
			free_pages(vstart, MAX_NUM_ORDER);
		}
	}

	/* free all memory blocks size of 2M and destroy contiguous region */
	for (; i < num_block; i++) {
		vstart = rsv_mm_info[i].vir_addr;
		if (vstart) {
			if (rsv_mm_info[i].exchange_flag)
				xen_destroy_contiguous_region(vstart,
					DOM0_CONTIG_NUM_ORDER);

			size = DOM0_MEMBLOCK_SIZE;
			vaddr = vstart;
			while (size > 0) {
				ClearPageReserved(virt_to_page(vaddr));
				vaddr += PAGE_SIZE;
				size -= PAGE_SIZE;
			}
			free_pages(vstart, DOM0_CONTIG_NUM_ORDER);
		}
	}

	memset(rsv_mm_info, 0, sizeof(struct memblock_info) * num_block);
	vfree(rsv_mm_info);
	rsv_mm_info = NULL;

	return 0;
}