Esempio n. 1
0
/**
 * flex_array_alloc - allocate a new flexible array
 * @element_size:	the size of individual elements in the array
 * @total:		total number of elements that this should hold
 * @flags:		page allocation flags to use for base array
 *
 * Note: all locking must be provided by the caller.
 *
 * @total is used to size internal structures.  If the user ever
 * accesses any array indexes >=@total, it will produce errors.
 *
 * The maximum number of elements is defined as: the number of
 * elements that can be stored in a page times the number of
 * page pointers that we can fit in the base structure or (using
 * integer math):
 *
 * 	(PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
 *
 * Here's a table showing example capacities.  Note that the maximum
 * index that the get/put() functions is just nr_objects-1.   This
 * basically means that you get 4MB of storage on 32-bit and 2MB on
 * 64-bit.
 *
 *
 * Element size | Objects | Objects |
 * PAGE_SIZE=4k |  32-bit |  64-bit |
 * ---------------------------------|
 *      1 bytes | 4177920 | 2088960 |
 *      2 bytes | 2088960 | 1044480 |
 *      3 bytes | 1392300 |  696150 |
 *      4 bytes | 1044480 |  522240 |
 *     32 bytes |  130560 |   65408 |
 *     33 bytes |  126480 |   63240 |
 *   2048 bytes |    2040 |    1020 |
 *   2049 bytes |    1020 |     510 |
 *       void * | 1044480 |  261120 |
 *
 * Since 64-bit pointers are twice the size, we lose half the
 * capacity in the base structure.  Also note that no effort is made
 * to efficiently pack objects across page boundaries.
 */
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
					gfp_t flags)
{
	struct flex_array *ret;
	int elems_per_part = 0;
	int reciprocal_elems = 0;
	int max_size = 0;

	if (element_size) {
		elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
		reciprocal_elems = reciprocal_value(elems_per_part);
		max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part;
	}

	/* max_size will end up 0 if element_size > PAGE_SIZE */
	if (total > max_size)
		return NULL;
	ret = kzalloc(sizeof(struct flex_array), flags);
	if (!ret)
		return NULL;
	ret->element_size = element_size;
	ret->total_nr_elements = total;
	ret->elems_per_part = elems_per_part;
	ret->reciprocal_elems = reciprocal_elems;
	if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
		memset(&ret->parts[0], FLEX_ARRAY_FREE,
						FLEX_ARRAY_BASE_BYTES_LEFT);
	return ret;
}
Esempio n. 2
0
/**
 *	ovs_vport_set_upcall_portids - set upcall portids of @vport.
 *
 * @vport: vport to modify.
 * @ids: new configuration, an array of port ids.
 *
 * Sets the vport's upcall_portids to @ids.
 *
 * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
 * as an array of U32.
 *
 * Must be called with ovs_mutex.
 */
int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
{
	struct vport_portids *old, *vport_portids;

	if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
		return -EINVAL;

    //读保护, rcu_dereference_protected
	old = ovsl_dereference(vport->upcall_portids);

	vport_portids = kmalloc(sizeof *vport_portids + nla_len(ids),
				GFP_KERNEL);
	if (!vport_portids)
		return -ENOMEM;

	vport_portids->n_ids = nla_len(ids) / sizeof(u32);
	vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
	nla_memcpy(vport_portids->ids, ids, nla_len(ids));

	rcu_assign_pointer(vport->upcall_portids, vport_portids);

	if (old)
        //等待 old 的所有读者都完成后, 是否 old 指向的内存
		call_rcu(&old->rcu, vport_portids_destroy_rcu_cb);

	return 0;
}
Esempio n. 3
0
static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	const struct tc_netem_rate *r = nla_data(attr);

	q->rate = r->rate;
	q->packet_overhead = r->packet_overhead;
	q->cell_size = r->cell_size;
	if (q->cell_size)
		q->cell_size_reciprocal = reciprocal_value(q->cell_size);
	q->cell_overhead = r->cell_overhead;
}
Esempio n. 4
0
/* Initialize the ring buffer. */
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
		       struct page *pages, u32 page_cnt)
{
	int i;
	struct page **pages_wraparound;

	BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));

	memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));

	/*
	 * First page holds struct hv_ring_buffer, do wraparound mapping for
	 * the rest.
	 */
	pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
				   GFP_KERNEL);
	if (!pages_wraparound)
		return -ENOMEM;

	pages_wraparound[0] = pages;
	for (i = 0; i < 2 * (page_cnt - 1); i++)
		pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];

	ring_info->ring_buffer = (struct hv_ring_buffer *)
		vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);

	kfree(pages_wraparound);


	if (!ring_info->ring_buffer)
		return -ENOMEM;

	ring_info->ring_buffer->read_index =
		ring_info->ring_buffer->write_index = 0;

	/* Set the feature bit for enabling flow control. */
	ring_info->ring_buffer->feature_bits.value = 1;

	ring_info->ring_size = page_cnt << PAGE_SHIFT;
	ring_info->ring_size_div10_reciprocal =
		reciprocal_value(ring_info->ring_size / 10);
	ring_info->ring_datasize = ring_info->ring_size -
		sizeof(struct hv_ring_buffer);

	spin_lock_init(&ring_info->ring_lock);

	return 0;
}
Esempio n. 5
0
File: vport.c Progetto: JunoZhu/ovs
/**
 *	ovs_vport_set_upcall_portids - set upcall portids of @vport.
 *
 * @vport: vport to modify.
 * @ids: new configuration, an array of port ids.
 *
 * Sets the vport's upcall_portids to @ids.
 *
 * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
 * as an array of U32.
 *
 * Must be called with ovs_mutex.
 */
int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
{
	struct vport_portids *old, *vport_portids;

	if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
		return -EINVAL;

	old = ovsl_dereference(vport->upcall_portids);

	vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
				GFP_KERNEL);
	if (!vport_portids)
		return -ENOMEM;

	vport_portids->n_ids = nla_len(ids) / sizeof(u32);
	vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
	nla_memcpy(vport_portids->ids, ids, nla_len(ids));

	rcu_assign_pointer(vport->upcall_portids, vport_portids);

	if (old)
		kfree_rcu(old, rcu);
	return 0;
}
Esempio n. 6
0
static int
nflash_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
{
	struct nflash_mtd *nflash = (struct nflash_mtd *) mtd->priv;
	struct mtd_partition *part = NULL;
	int i, ret = 0;
	uint addr, len, blocksize;
	uint part_start_blk, part_end_blk;
	uint blknum, new_addr, erase_blknum;
	uint reciprocal_blocksize;

	addr = erase->addr;
	len = erase->len;

	blocksize = mtd->erasesize;
	reciprocal_blocksize = reciprocal_value(blocksize);

	/* Check address range */
	if (!len)
		return 0;

	if ((addr + len) > mtd->size)
		return -EINVAL;

	if (addr & (blocksize - 1))
		return -EINVAL;

	/* Locate the part */
	for (i = 0; nflash_parts[i].name; i++) {
		if (addr >= nflash_parts[i].offset &&
			((addr + len) <= (nflash_parts[i].offset + nflash_parts[i].size))) {
			part = &nflash_parts[i];
			break;
		}
	}

	if (!part)
		return -EINVAL;

	NFLASH_LOCK(nflash);

	/* Find the effective start block address to erase */
	part_start_blk = reciprocal_divide(part->offset & ~(blocksize-1),
		reciprocal_blocksize);
	part_end_blk = reciprocal_divide(((part->offset + part->size) + (blocksize-1)),
		reciprocal_blocksize);

	new_addr = part_start_blk * blocksize;
	/* The block number to be skipped relative to the start address of
	 * the MTD partition
	 */
	blknum = reciprocal_divide(addr - new_addr, reciprocal_blocksize);

	for (i = part_start_blk; (i < part_end_blk) && (blknum > 0); i++) {
		if (nflash->map[i] != 0) {
			new_addr += blocksize;
		} else {
			new_addr += blocksize;
			blknum--;
		}
	}

	/* Erase the blocks from the new block address */
	erase_blknum = reciprocal_divide(len + (blocksize-1), reciprocal_blocksize);

	if ((new_addr + (erase_blknum * blocksize)) > (part->offset + part->size)) {
		ret = -EINVAL;
		goto done;
	}

	for (i = new_addr; erase_blknum; i += blocksize) {
		/* Skip bad block erase */
		uint j = reciprocal_divide(i, reciprocal_blocksize);
		if (nflash->map[j] != 0) {
			continue;
		}

		if ((ret = hndnand_erase(nflash->nfl, i)) < 0) {
			hndnand_mark_badb(nflash->nfl, i);
			nflash->map[i / blocksize] = 1;
		} else {
			erase_blknum--;
		}
	}

done:
	/* Set erase status */
	if (ret)
		erase->state = MTD_ERASE_FAILED;
	else
		erase->state = MTD_ERASE_DONE;

	NFLASH_UNLOCK(nflash);

	/* Call erase callback */
	if (erase->callback)
		erase->callback(erase);

	return ret;
}
Esempio n. 7
0
static int
nflash_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
	struct nflash_mtd *nflash = (struct nflash_mtd *) mtd->priv;
	int bytes, ret = 0;
	struct mtd_partition *part = NULL;
	u_char *block = NULL;
	u_char *ptr = (u_char *)buf;
	uint offset, blocksize, mask, blk_offset, off;
	uint skip_bytes = 0, good_bytes = 0;
	int blk_idx, i;
	int read_len, write_len, copy_len = 0;
	loff_t from = to;
	u_char *write_ptr;
	int docopy = 1;
	uint r_blocksize, part_blk_start, part_blk_end;

	/* Locate the part */
	for (i = 0; nflash_parts[i].name; i++) {
		if (to >= nflash_parts[i].offset &&
			((nflash_parts[i+1].name == NULL) ||
			(to < (nflash_parts[i].offset + nflash_parts[i].size)))) {
			part = &nflash_parts[i];
			break;
		}
	}
	if (!part)
		return -EINVAL;
	/* Check address range */
	if (!len)
		return 0;
	if ((to + len) > (part->offset + part->size))
		return -EINVAL;
	offset = to;
	blocksize = mtd->erasesize;
	r_blocksize = reciprocal_value(blocksize);

	if (!(block = kmalloc(blocksize, GFP_KERNEL)))
		return -ENOMEM;

	NFLASH_LOCK(nflash);

	mask = blocksize - 1;
	/* Check and skip bad blocks */
	blk_offset = offset & ~mask;
	good_bytes = part->offset & ~mask;
	part_blk_start = reciprocal_divide(good_bytes, r_blocksize);
	part_blk_end = reciprocal_divide(part->offset + part->size, r_blocksize);

	for (blk_idx = part_blk_start;  blk_idx < part_blk_end; blk_idx++) {
		if (nflash->map[blk_idx] != 0) {
			skip_bytes += blocksize;
		} else {
			if (good_bytes == blk_offset)
				break;
			good_bytes += blocksize;
		}
	}
	if (blk_idx == part_blk_end) {
		ret = -EINVAL;
		goto done;
	}
	blk_offset = blocksize * blk_idx;
	/* Backup and erase one block at a time */
	*retlen = 0;
	while (len) {
		if (docopy) {
			/* Align offset */
			from = offset & ~mask;
			/* Copy existing data into holding block if necessary */
			if (((offset & (blocksize-1)) != 0) || (len < blocksize)) {
				ret = _nflash_mtd_read(mtd, part, from, blocksize,
					&read_len, block);
				if (ret)
					goto done;
				if (read_len != blocksize) {
					ret = -EINVAL;
					goto done;
				}
			}
			/* Copy input data into holding block */
			copy_len = min(len, blocksize - (offset & mask));
			memcpy(block + (offset & mask), ptr, copy_len);
		}
		off = (uint) from + skip_bytes;
		/* Erase block */
		if ((ret = hndnand_erase(nflash->nfl, off)) < 0) {
				hndnand_mark_badb(nflash->nfl, off);
				nflash->map[blk_idx] = 1;
				skip_bytes += blocksize;
				docopy = 0;
		}
		else {
			/* Write holding block */
			write_ptr = block;
			write_len = blocksize;
			while (write_len) {
				if ((bytes = hndnand_write(nflash->nfl,
				from + skip_bytes, (uint) write_len,
				(uchar *) write_ptr)) < 0) {
					hndnand_mark_badb(nflash->nfl, off);
					nflash->map[blk_idx] = 1;
					skip_bytes += blocksize;
					docopy = 0;
					break;
				}
				from += bytes;
				write_len -= bytes;
				write_ptr += bytes;
				docopy = 1;
			}
			if (docopy) {
				offset += copy_len;
				len -= copy_len;
				ptr += copy_len;
				*retlen += copy_len;
			}
		}
		/* Check and skip bad blocks */
		if (len) {
			blk_offset += blocksize;
			blk_idx++;
			while ((nflash->map[blk_idx] != 0) &&
			       (blk_offset < (part->offset+part->size))) {
				skip_bytes += blocksize;
				blk_offset += blocksize;
				blk_idx++;
			}
			if (blk_offset >= (part->offset+part->size)) {
				ret = -EINVAL;
				goto done;
			}
		}
	}
done:
	NFLASH_UNLOCK(nflash);

	if (block)
		kfree(block);
	return ret;
}