Exemplo n.º 1
0
Arquivo: qp.c Projeto: 020gzh/linux
/**
 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
 *	       IB_QPT_SMI/IB_QPT_GSI
 *@rdi:	rvt device info structure
 *@qpt: queue pair number table pointer
 *@port_num: IB port number, 1 based, comes from core
 *
 * Return: The queue pair number
 */
static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
		     enum ib_qp_type type, u8 port_num, gfp_t gfp)
{
	u32 i, offset, max_scan, qpn;
	struct rvt_qpn_map *map;
	u32 ret;

	if (rdi->driver_f.alloc_qpn)
		return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);

	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
		unsigned n;

		ret = type == IB_QPT_GSI;
		n = 1 << (ret + 2 * (port_num - 1));
		spin_lock(&qpt->lock);
		if (qpt->flags & n)
			ret = -EINVAL;
		else
			qpt->flags |= n;
		spin_unlock(&qpt->lock);
		goto bail;
	}

	qpn = qpt->last + qpt->incr;
	if (qpn >= RVT_QPN_MAX)
		qpn = qpt->incr | ((qpt->last & 1) ^ 1);
	/* offset carries bit 0 */
	offset = qpn & RVT_BITS_PER_PAGE_MASK;
	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
	max_scan = qpt->nmaps - !offset;
	for (i = 0;;) {
		if (unlikely(!map->page)) {
			get_map_page(qpt, map, gfp);
			if (unlikely(!map->page))
				break;
		}
		do {
			if (!test_and_set_bit(offset, map->page)) {
				qpt->last = qpn;
				ret = qpn;
				goto bail;
			}
			offset += qpt->incr;
			/*
			 * This qpn might be bogus if offset >= BITS_PER_PAGE.
			 * That is OK.   It gets re-assigned below
			 */
			qpn = mk_qpn(qpt, map, offset);
		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
		/*
		 * In order to keep the number of pages allocated to a
		 * minimum, we scan the all existing pages before increasing
		 * the size of the bitmap table.
		 */
		if (++i > max_scan) {
			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
				break;
			map = &qpt->map[qpt->nmaps++];
			/* start at incr with current bit 0 */
			offset = qpt->incr | (offset & 1);
		} else if (map < &qpt->map[qpt->nmaps]) {
			++map;
			/* start at incr with current bit 0 */
			offset = qpt->incr | (offset & 1);
		} else {
			map = &qpt->map[0];
			/* wrap to first map page, invert bit 0 */
			offset = qpt->incr | ((offset & 1) ^ 1);
		}
		/* there can be no bits at shift and below */
		WARN_ON(offset & (rdi->dparms.qos_shift - 1));
		qpn = mk_qpn(qpt, map, offset);
	}

	ret = -ENOMEM;

bail:
	return ret;
}
Exemplo n.º 2
0
static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
{
	u32 i, offset, max_scan, qpn;
	struct qpn_map *map;
	u32 ret = -1;

	if (type == IB_QPT_SMI)
		ret = 0;
	else if (type == IB_QPT_GSI)
		ret = 1;

	if (ret != -1) {
		map = &qpt->map[0];
		if (unlikely(!map->page)) {
			get_map_page(qpt, map);
			if (unlikely(!map->page)) {
				ret = -ENOMEM;
				goto bail;
			}
		}
		if (!test_and_set_bit(ret, map->page))
			atomic_dec(&map->n_free);
		else
			ret = -EBUSY;
		goto bail;
	}

	qpn = qpt->last + 1;
	if (qpn >= QPN_MAX)
		qpn = 2;
	offset = qpn & BITS_PER_PAGE_MASK;
	map = &qpt->map[qpn / BITS_PER_PAGE];
	max_scan = qpt->nmaps - !offset;
	for (i = 0;;) {
		if (unlikely(!map->page)) {
			get_map_page(qpt, map);
			if (unlikely(!map->page))
				break;
		}
		if (likely(atomic_read(&map->n_free))) {
			do {
				if (!test_and_set_bit(offset, map->page)) {
					atomic_dec(&map->n_free);
					qpt->last = qpn;
					ret = qpn;
					goto bail;
				}
				offset = find_next_offset(map, offset);
				qpn = mk_qpn(qpt, map, offset);
				/*
                                             
                                             
                                          
                                             
                                            
               
     */
			} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
		}
		/*
                                                        
                                                              
                                  
   */
		if (++i > max_scan) {
			if (qpt->nmaps == QPNMAP_ENTRIES)
				break;
			map = &qpt->map[qpt->nmaps++];
			offset = 0;
		} else if (map < &qpt->map[qpt->nmaps]) {
			++map;
			offset = 0;
		} else {
			map = &qpt->map[0];
			offset = 2;
		}
		qpn = mk_qpn(qpt, map, offset);
	}

	ret = -ENOMEM;

bail:
	return ret;
}
Exemplo n.º 3
0
static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
{
	u32 i, offset, max_scan, qpn;
	struct qpn_map *map;
	u32 ret = -1;

	if (type == IB_QPT_SMI)
		ret = 0;
	else if (type == IB_QPT_GSI)
		ret = 1;

	if (ret != -1) {
		map = &qpt->map[0];
		if (unlikely(!map->page)) {
			get_map_page(qpt, map);
			if (unlikely(!map->page)) {
				ret = -ENOMEM;
				goto bail;
			}
		}
		if (!test_and_set_bit(ret, map->page))
			atomic_dec(&map->n_free);
		else
			ret = -EBUSY;
		goto bail;
	}

	qpn = qpt->last + 1;
	if (qpn >= QPN_MAX)
		qpn = 2;
	offset = qpn & BITS_PER_PAGE_MASK;
	map = &qpt->map[qpn / BITS_PER_PAGE];
	max_scan = qpt->nmaps - !offset;
	for (i = 0;;) {
		if (unlikely(!map->page)) {
			get_map_page(qpt, map);
			if (unlikely(!map->page))
				break;
		}
		if (likely(atomic_read(&map->n_free))) {
			do {
				if (!test_and_set_bit(offset, map->page)) {
					atomic_dec(&map->n_free);
					qpt->last = qpn;
					ret = qpn;
					goto bail;
				}
				offset = find_next_offset(map, offset);
				qpn = mk_qpn(qpt, map, offset);
				/*
				 * This test differs from alloc_pidmap().
				 * If find_next_offset() does find a zero
				 * bit, we don't need to check for QPN
				 * wrapping around past our starting QPN.
				 * We just need to be sure we don't loop
				 * forever.
				 */
			} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
		}
		/*
		 * In order to keep the number of pages allocated to a
		 * minimum, we scan the all existing pages before increasing
		 * the size of the bitmap table.
		 */
		if (++i > max_scan) {
			if (qpt->nmaps == QPNMAP_ENTRIES)
				break;
			map = &qpt->map[qpt->nmaps++];
			offset = 0;
		} else if (map < &qpt->map[qpt->nmaps]) {
			++map;
			offset = 0;
		} else {
			map = &qpt->map[0];
			offset = 2;
		}
		qpn = mk_qpn(qpt, map, offset);
	}

	ret = -ENOMEM;

bail:
	return ret;
}