/** * init_qpn_table - initialize the QP number table for a device * @qpt: the QPN table */ static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) { u32 offset, i; struct rvt_qpn_map *map; int ret = 0; if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start)) return -EINVAL; spin_lock_init(&qpt->lock); qpt->last = rdi->dparms.qpn_start; qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift; /* * Drivers may want some QPs beyond what we need for verbs let them use * our qpn table. No need for two. Lets go ahead and mark the bitmaps * for those. The reserved range must be *after* the range which verbs * will pick from. */ /* Figure out number of bit maps needed before reserved range */ qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE; /* This should always be zero */ offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK; /* Starting with the first reserved bit map */ map = &qpt->map[qpt->nmaps]; rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n", rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { if (!map->page) { get_map_page(qpt, map, GFP_KERNEL); if (!map->page) { ret = -ENOMEM; break; } } set_bit(offset, map->page); offset++; if (offset == RVT_BITS_PER_PAGE) { /* next page */ qpt->nmaps++; map++; offset = 0; } } return ret; }
/** * init_qpn_table - initialize the QP number table for a device * @qpt: the QPN table */ static int init_qpn_table(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt) { u32 offset, qpn, i; struct qpn_map *map; int ret = 0; spin_lock_init(&qpt->lock); qpt->last = 0; qpt->incr = 1 << dd->qos_shift; /* insure we don't assign QPs from KDETH 64K window */ qpn = kdeth_qp << 16; qpt->nmaps = qpn / BITS_PER_PAGE; /* This should always be zero */ offset = qpn & BITS_PER_PAGE_MASK; map = &qpt->map[qpt->nmaps]; dd_dev_info(dd, "Reserving QPNs for KDETH window from 0x%x to 0x%x\n", qpn, qpn + 65535); for (i = 0; i < 65536; i++) { if (!map->page) { get_map_page(qpt, map); if (!map->page) { ret = -ENOMEM; break; } } set_bit(offset, map->page); offset++; if (offset == BITS_PER_PAGE) { /* next page */ qpt->nmaps++; map++; offset = 0; } } return ret; }
static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type) { u32 i, offset, max_scan, qpn; struct qpn_map *map; u32 ret = -1; if (type == IB_QPT_SMI) ret = 0; else if (type == IB_QPT_GSI) ret = 1; if (ret != -1) { map = &qpt->map[0]; if (unlikely(!map->page)) { get_map_page(qpt, map); if (unlikely(!map->page)) { ret = -ENOMEM; goto bail; } } if (!test_and_set_bit(ret, map->page)) atomic_dec(&map->n_free); else ret = -EBUSY; goto bail; } qpn = qpt->last + 1; if (qpn >= QPN_MAX) qpn = 2; offset = qpn & BITS_PER_PAGE_MASK; map = &qpt->map[qpn / BITS_PER_PAGE]; max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { get_map_page(qpt, map); if (unlikely(!map->page)) break; } if (likely(atomic_read(&map->n_free))) { do { if (!test_and_set_bit(offset, map->page)) { atomic_dec(&map->n_free); qpt->last = qpn; ret = qpn; goto bail; } offset = find_next_offset(map, offset); qpn = mk_qpn(qpt, map, offset); /* */ } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); } /* */ if (++i > max_scan) { if (qpt->nmaps == QPNMAP_ENTRIES) break; map = &qpt->map[qpt->nmaps++]; offset = 0; } else if (map < &qpt->map[qpt->nmaps]) { ++map; offset = 0; } else { map = &qpt->map[0]; offset = 2; } qpn = mk_qpn(qpt, map, offset); } ret = -ENOMEM; bail: return ret; }
/** * alloc_qpn - Allocate the next available qpn or zero/one for QP type * IB_QPT_SMI/IB_QPT_GSI *@rdi: rvt device info structure *@qpt: queue pair number table pointer *@port_num: IB port number, 1 based, comes from core * * Return: The queue pair number */ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, enum ib_qp_type type, u8 port_num, gfp_t gfp) { u32 i, offset, max_scan, qpn; struct rvt_qpn_map *map; u32 ret; if (rdi->driver_f.alloc_qpn) return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp); if (type == IB_QPT_SMI || type == IB_QPT_GSI) { unsigned n; ret = type == IB_QPT_GSI; n = 1 << (ret + 2 * (port_num - 1)); spin_lock(&qpt->lock); if (qpt->flags & n) ret = -EINVAL; else qpt->flags |= n; spin_unlock(&qpt->lock); goto bail; } qpn = qpt->last + qpt->incr; if (qpn >= RVT_QPN_MAX) qpn = qpt->incr | ((qpt->last & 1) ^ 1); /* offset carries bit 0 */ offset = qpn & RVT_BITS_PER_PAGE_MASK; map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { get_map_page(qpt, map, gfp); if (unlikely(!map->page)) break; } do { if (!test_and_set_bit(offset, map->page)) { qpt->last = qpn; ret = qpn; goto bail; } offset += qpt->incr; /* * This qpn might be bogus if offset >= BITS_PER_PAGE. * That is OK. It gets re-assigned below */ qpn = mk_qpn(qpt, map, offset); } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); /* * In order to keep the number of pages allocated to a * minimum, we scan the all existing pages before increasing * the size of the bitmap table. */ if (++i > max_scan) { if (qpt->nmaps == RVT_QPNMAP_ENTRIES) break; map = &qpt->map[qpt->nmaps++]; /* start at incr with current bit 0 */ offset = qpt->incr | (offset & 1); } else if (map < &qpt->map[qpt->nmaps]) { ++map; /* start at incr with current bit 0 */ offset = qpt->incr | (offset & 1); } else { map = &qpt->map[0]; /* wrap to first map page, invert bit 0 */ offset = qpt->incr | ((offset & 1) ^ 1); } /* there can be no bits at shift and below */ WARN_ON(offset & (rdi->dparms.qos_shift - 1)); qpn = mk_qpn(qpt, map, offset); } ret = -ENOMEM; bail: return ret; }
static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type) { u32 i, offset, max_scan, qpn; struct qpn_map *map; u32 ret = -1; if (type == IB_QPT_SMI) ret = 0; else if (type == IB_QPT_GSI) ret = 1; if (ret != -1) { map = &qpt->map[0]; if (unlikely(!map->page)) { get_map_page(qpt, map); if (unlikely(!map->page)) { ret = -ENOMEM; goto bail; } } if (!test_and_set_bit(ret, map->page)) atomic_dec(&map->n_free); else ret = -EBUSY; goto bail; } qpn = qpt->last + 1; if (qpn >= QPN_MAX) qpn = 2; offset = qpn & BITS_PER_PAGE_MASK; map = &qpt->map[qpn / BITS_PER_PAGE]; max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { get_map_page(qpt, map); if (unlikely(!map->page)) break; } if (likely(atomic_read(&map->n_free))) { do { if (!test_and_set_bit(offset, map->page)) { atomic_dec(&map->n_free); qpt->last = qpn; ret = qpn; goto bail; } offset = find_next_offset(map, offset); qpn = mk_qpn(qpt, map, offset); /* * This test differs from alloc_pidmap(). * If find_next_offset() does find a zero * bit, we don't need to check for QPN * wrapping around past our starting QPN. * We just need to be sure we don't loop * forever. */ } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); } /* * In order to keep the number of pages allocated to a * minimum, we scan the all existing pages before increasing * the size of the bitmap table. */ if (++i > max_scan) { if (qpt->nmaps == QPNMAP_ENTRIES) break; map = &qpt->map[qpt->nmaps++]; offset = 0; } else if (map < &qpt->map[qpt->nmaps]) { ++map; offset = 0; } else { map = &qpt->map[0]; offset = 2; } qpn = mk_qpn(qpt, map, offset); } ret = -ENOMEM; bail: return ret; }