int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = fs_cpu_buffer_size; for_each_online_cpu(i) { struct oprofile_cpu_buffer * b = &cpu_buffer[i]; b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, cpu_to_node(i)); if (!b->buffer) goto fail; b->last_task = NULL; b->last_cpu_mode = -1; b->tracing = 0; b->buffer_size = buffer_size; b->tail_pos = 0; b->head_pos = 0; b->sample_received = 0; b->sample_lost_overflow = 0; b->cpu = i; INIT_WORK(&b->work, wq_sync_buffer, b); } return 0; fail: free_cpu_buffers(); return -ENOMEM; }
int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = fs_cpu_buffer_size; for_each_possible_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, cpu_to_node(i)); if (!b->buffer) goto fail; b->last_task = NULL; b->last_is_kernel = -1; b->tracing = 0; b->buffer_size = buffer_size; b->tail_pos = 0; b->head_pos = 0; b->sample_received = 0; b->sample_lost_overflow = 0; b->backtrace_aborted = 0; b->sample_invalid_eip = 0; b->cpu = i; INIT_DELAYED_WORK(&b->work, wq_sync_buffer); } return 0; fail: free_cpu_buffers(); return -ENOMEM; }
static void *fq_alloc_node(size_t sz, int node) { void *ptr; ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node); if (!ptr) ptr = vmalloc_node(sz, node); return ptr; }
static void * repl_vmalloc_node(unsigned long size, int node) { void *ret_val; ret_val = vmalloc_node(size, node); if (ret_val != NULL) klc_add_alloc(ret_val, size, stack_depth); return ret_val; }
/* __alloc_bootmem...() is protected by !slab_available() */ int __init_refok init_section_page_cgroup(unsigned long pfn) { struct mem_section *section; struct page_cgroup *base, *pc; unsigned long table_size; int nid, index; section = __pfn_to_section(pfn); if (!section->page_cgroup) { nid = page_to_nid(pfn_to_page(pfn)); table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; if (slab_is_available()) { base = kmalloc_node(table_size, GFP_KERNEL, nid); if (!base) base = vmalloc_node(table_size, nid); } else { base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); } } else { /* * We don't have to allocate page_cgroup again, but * address of memmap may be changed. So, we have to initialize * again. */ base = section->page_cgroup + pfn; table_size = 0; /* check address of memmap is changed or not. */ if (base->page == pfn_to_page(pfn)) return 0; } if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } for (index = 0; index < PAGES_PER_SECTION; index++) { pc = base + index; __init_page_cgroup(pc, pfn + index); } section = __pfn_to_section(pfn); section->page_cgroup = base - pfn; total_usage += table_size; return 0; }
static void *__meminit alloc_page_cgroup(size_t size, int nid) { void *addr = NULL; addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN); if (addr) return addr; if (node_state(nid, N_HIGH_MEMORY)) addr = vmalloc_node(size, nid); else addr = vmalloc(size); return addr; }
/* __alloc_bootmem...() is protected by !slab_available() */ static int __init_refok init_section_page_cgroup(unsigned long pfn) { struct mem_section *section = __pfn_to_section(pfn); struct page_cgroup *base, *pc; unsigned long table_size; int nid, index; if (!section->page_cgroup) { nid = page_to_nid(pfn_to_page(pfn)); table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; VM_BUG_ON(!slab_is_available()); if (node_state(nid, N_HIGH_MEMORY)) { base = kmalloc_node(table_size, GFP_KERNEL | __GFP_NOWARN, nid); if (!base) base = vmalloc_node(table_size, nid); } else { base = kmalloc(table_size, GFP_KERNEL | __GFP_NOWARN); if (!base) base = vmalloc(table_size); } } else { /* * We don't have to allocate page_cgroup again, but * address of memmap may be changed. So, we have to initialize * again. */ base = section->page_cgroup + pfn; table_size = 0; /* check address of memmap is changed or not. */ if (base->page == pfn_to_page(pfn)) return 0; } if (!base) { printk(KERN_ERR "page cgroup allocation failure\n"); return -ENOMEM; } for (index = 0; index < PAGES_PER_SECTION; index++) { pc = base + index; __init_page_cgroup(pc, pfn + index); } section->page_cgroup = base - pfn; total_usage += table_size; return 0; }
static void *__meminit alloc_page_cgroup(size_t size, int nid) { void *addr = NULL; gfp_t flags = GFP_KERNEL | __GFP_NOWARN; addr = alloc_pages_exact_nid(nid, size, flags); if (addr) { kmemleak_alloc(addr, size, 1, flags); return addr; } if (node_state(nid, N_HIGH_MEMORY)) addr = vmalloc_node(size, nid); else addr = vmalloc(size); return addr; }
int alloc_cpu_buffers(void) { int i; unsigned long buffer_size = fs_cpu_buffer_size; for_each_online_cpu(i) { struct oprofile_cpu_buffer * b = &cpu_buffer[i]; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) //SLES10, RHEL5 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, cpu_to_node(i)); #else //RHEL4 b->buffer = vmalloc(sizeof(struct op_sample) * buffer_size); #endif if (!b->buffer) goto fail; b->last_task = NULL; b->last_cpu_mode = -1; b->tracing = 0; b->buffer_size = buffer_size; b->tail_pos = 0; b->head_pos = 0; b->sample_received = 0; b->sample_lost_overflow = 0; b->cpu = i; #ifdef CONFIG_CA_CSS b->ca_css_interval = 0; #endif INIT_WORK(&b->work, wq_sync_buffer, b); } return 0; fail: free_cpu_buffers(); return -ENOMEM; }
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, u32 size, u16 stride, int node, int queue_idx) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring; int tmp; int err; ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node); if (!ring) { ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL); if (!ring) { en_err(priv, "Failed allocating TX ring\n"); return -ENOMEM; } } ring->size = size; ring->size_mask = size - 1; ring->stride = stride; #ifdef CONFIG_RATELIMIT ring->rl_data.rate_index = 0; /* User_valid should be false in a rate_limit ring until the * creation process of the ring is done, after the activation. */ if (queue_idx < priv->native_tx_ring_num) ring->rl_data.user_valid = true; else ring->rl_data.user_valid = false; #endif ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; ring->inline_thold = min(inline_thold, MAX_INLINE); mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF); mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF); /* Allocate the buf ring */ #ifdef CONFIG_RATELIMIT if (queue_idx < priv->native_tx_ring_num) ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, M_WAITOK, &ring->tx_lock.m); else ring->br = buf_ring_alloc(size / 4, M_DEVBUF, M_WAITOK, &ring->tx_lock.m); #else ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, M_WAITOK, &ring->tx_lock.m); #endif if (ring->br == NULL) { en_err(priv, "Failed allocating tx_info ring\n"); return -ENOMEM; } tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = vmalloc_node(tmp, node); if (!ring->tx_info) { ring->tx_info = vmalloc(tmp); if (!ring->tx_info) { err = -ENOMEM; goto err_ring; } } en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node); if (!ring->bounce_buf) { ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); if (!ring->bounce_buf) { err = -ENOMEM; goto err_info; } } ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); /* Allocate HW buffers on provided NUMA node */ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { en_err(priv, "Failed to map TX buffer\n"); goto err_hwq_res; } ring->buf = ring->wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, MLX4_RESERVE_BF_QP); if (err) { en_err(priv, "failed reserving qp for TX ring\n"); goto err_map; } err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } ring->qp.event = mlx4_en_sqp_event; err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); if (err) { en_dbg(DRV, priv, "working without blueflame (%d)", err); ring->bf.uar = &mdev->priv_uar; ring->bf.uar->map = mdev->uar_map; ring->bf_enabled = false; } else ring->bf_enabled = true; ring->queue_index = queue_idx; if (queue_idx < priv->num_tx_rings_p_up ) CPU_SET(queue_idx, &ring->affinity_mask); *pring = ring; return 0; err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); err_map: mlx4_en_unmap_buffer(&ring->wqres.buf); err_hwq_res: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_bounce: kfree(ring->bounce_buf); err_info: vfree(ring->tx_info); err_ring: buf_ring_free(ring->br, M_DEVBUF); kfree(ring); return err; }
/** * rvt_create_qp - create a queue pair for a device * @ibpd: the protection domain who's device we create the queue pair for * @init_attr: the attributes of the queue pair * @udata: user data for libibverbs.so * * Queue pair creation is mostly an rvt issue. However, drivers have their own * unique idea of what queue pair numbers mean. For instance there is a reserved * range for PSM. * * Return: the queue pair on success, otherwise returns an errno. * * Called by the ib_create_qp() core verbs function. */ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct rvt_qp *qp; int err; struct rvt_swqe *swq = NULL; size_t sz; size_t sg_list_sz; struct ib_qp *ret = ERR_PTR(-ENOMEM); struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); void *priv = NULL; gfp_t gfp; if (!rdi) return ERR_PTR(-EINVAL); if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) return ERR_PTR(-EINVAL); /* GFP_NOIO is applicable to RC QP's only */ if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && init_attr->qp_type != IB_QPT_RC) return ERR_PTR(-EINVAL); gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? GFP_NOIO : GFP_KERNEL; /* Check receive queue parameters if no SRQ is specified. */ if (!init_attr->srq) { if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr) return ERR_PTR(-EINVAL); if (init_attr->cap.max_send_sge + init_attr->cap.max_send_wr + init_attr->cap.max_recv_sge + init_attr->cap.max_recv_wr == 0) return ERR_PTR(-EINVAL); } switch (init_attr->qp_type) { case IB_QPT_SMI: case IB_QPT_GSI: if (init_attr->port_num == 0 || init_attr->port_num > ibpd->device->phys_port_cnt) return ERR_PTR(-EINVAL); case IB_QPT_UC: case IB_QPT_RC: case IB_QPT_UD: sz = sizeof(struct rvt_sge) * init_attr->cap.max_send_sge + sizeof(struct rvt_swqe); if (gfp == GFP_NOIO) swq = __vmalloc( (init_attr->cap.max_send_wr + 1) * sz, gfp, PAGE_KERNEL); else swq = vmalloc_node( (init_attr->cap.max_send_wr + 1) * sz, rdi->dparms.node); if (!swq) return ERR_PTR(-ENOMEM); sz = sizeof(*qp); sg_list_sz = 0; if (init_attr->srq) { struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq); if (srq->rq.max_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * (srq->rq.max_sge - 1); } else if (init_attr->cap.max_recv_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * (init_attr->cap.max_recv_sge - 1); qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node); if (!qp) goto bail_swq; RCU_INIT_POINTER(qp->next, NULL); /* * Driver needs to set up it's private QP structure and do any * initialization that is needed. */ priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); if (!priv) goto bail_qp; qp->priv = priv; qp->timeout_jiffies = usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / 1000UL); if (init_attr->srq) { sz = 0; } else { qp->r_rq.size = init_attr->cap.max_recv_wr + 1; qp->r_rq.max_sge = init_attr->cap.max_recv_sge; sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + sizeof(struct rvt_rwqe); if (udata) qp->r_rq.wq = vmalloc_user( sizeof(struct rvt_rwq) + qp->r_rq.size * sz); else if (gfp == GFP_NOIO) qp->r_rq.wq = __vmalloc( sizeof(struct rvt_rwq) + qp->r_rq.size * sz, gfp, PAGE_KERNEL); else qp->r_rq.wq = vmalloc_node( sizeof(struct rvt_rwq) + qp->r_rq.size * sz, rdi->dparms.node); if (!qp->r_rq.wq) goto bail_driver_priv; } /* * ib_create_qp() will initialize qp->ibqp * except for qp->ibqp.qp_num. */ spin_lock_init(&qp->r_lock); spin_lock_init(&qp->s_hlock); spin_lock_init(&qp->s_lock); spin_lock_init(&qp->r_rq.lock); atomic_set(&qp->refcount, 0); init_waitqueue_head(&qp->wait); init_timer(&qp->s_timer); qp->s_timer.data = (unsigned long)qp; INIT_LIST_HEAD(&qp->rspwait); qp->state = IB_QPS_RESET; qp->s_wq = swq; qp->s_size = init_attr->cap.max_send_wr + 1; qp->s_avail = init_attr->cap.max_send_wr; qp->s_max_sge = init_attr->cap.max_send_sge; if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) qp->s_flags = RVT_S_SIGNAL_REQ_WR; err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, init_attr->qp_type, init_attr->port_num, gfp); if (err < 0) { ret = ERR_PTR(err); goto bail_rq_wq; } qp->ibqp.qp_num = err; qp->port_num = init_attr->port_num; rvt_reset_qp(rdi, qp, init_attr->qp_type); break; default: /* Don't support raw QPs */ return ERR_PTR(-EINVAL); } init_attr->cap.max_inline_data = 0; /* * Return the address of the RWQ as the offset to mmap. * See rvt_mmap() for details. */ if (udata && udata->outlen >= sizeof(__u64)) { if (!qp->r_rq.wq) { __u64 offset = 0; err = ib_copy_to_udata(udata, &offset, sizeof(offset)); if (err) { ret = ERR_PTR(err); goto bail_qpn; } } else { u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz; qp->ip = rvt_create_mmap_info(rdi, s, ibpd->uobject->context, qp->r_rq.wq); if (!qp->ip) { ret = ERR_PTR(-ENOMEM); goto bail_qpn; } err = ib_copy_to_udata(udata, &qp->ip->offset, sizeof(qp->ip->offset)); if (err) { ret = ERR_PTR(err); goto bail_ip; } } qp->pid = current->pid; } spin_lock(&rdi->n_qps_lock); if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) { spin_unlock(&rdi->n_qps_lock); ret = ERR_PTR(-ENOMEM); goto bail_ip; } rdi->n_qps_allocated++; /* * Maintain a busy_jiffies variable that will be added to the timeout * period in mod_retry_timer and add_retry_timer. This busy jiffies * is scaled by the number of rc qps created for the device to reduce * the number of timeouts occurring when there is a large number of * qps. busy_jiffies is incremented every rc qp scaling interval. * The scaling interval is selected based on extensive performance * evaluation of targeted workloads. */ if (init_attr->qp_type == IB_QPT_RC) { rdi->n_rc_qps++; rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL; } spin_unlock(&rdi->n_qps_lock); if (qp->ip) { spin_lock_irq(&rdi->pending_lock); list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps); spin_unlock_irq(&rdi->pending_lock); } ret = &qp->ibqp; /* * We have our QP and its good, now keep track of what types of opcodes * can be processed on this QP. We do this by keeping track of what the * 3 high order bits of the opcode are. */ switch (init_attr->qp_type) { case IB_QPT_SMI: case IB_QPT_GSI: case IB_QPT_UD: qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK; break; case IB_QPT_RC: qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK; break; case IB_QPT_UC: qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK; break; default: ret = ERR_PTR(-EINVAL); goto bail_ip; } return ret; bail_ip: kref_put(&qp->ip->ref, rvt_release_mmap_info); bail_qpn: free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); bail_rq_wq: vfree(qp->r_rq.wq); bail_driver_priv: rdi->driver_f.qp_priv_free(rdi, qp); bail_qp: kfree(qp); bail_swq: vfree(swq); return ret; }
int efhw_iopages_alloc(struct pci_dev *pci_dev, struct efhw_iopages *p, unsigned order, efhw_iommu_domain *vf_domain, unsigned long iova_base) { /* dma_alloc_coherent() is really the right interface to use here. * However, it allocates memory "close" to the device, but we want * memory on the current numa node. Also we need the memory to be * contiguous in the kernel, but not necessarily in physical * memory. * * vf_domain is the IOMMU protection domain - it imples that pci_dev * is a VF that should not use the normal DMA mapping APIs */ struct device *dev = &pci_dev->dev; int i = 0; p->n_pages = 1 << order; p->dma_addrs = kmalloc(p->n_pages * sizeof(p->dma_addrs[0]), 0); if (p->dma_addrs == NULL) goto fail1; p->ptr = vmalloc_node(p->n_pages << PAGE_SHIFT, -1); if (p->ptr == NULL) goto fail2; for (i = 0; i < p->n_pages; ++i) { struct page *page; page = vmalloc_to_page(p->ptr + (i << PAGE_SHIFT)); if (!vf_domain) { p->dma_addrs[i] = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, p->dma_addrs[i])) { EFHW_ERR("%s: ERROR dma_map_page failed", __FUNCTION__); goto fail3; } } else #ifdef CONFIG_SFC_RESOURCE_VF_IOMMU { int rc; p->dma_addrs[i] = iova_base; rc = iommu_map(vf_domain, p->dma_addrs[i], page_to_phys(page), PAGE_SIZE, IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE); if (rc) { EFHW_ERR("%s: ERROR iommu_map failed (%d)", __FUNCTION__, rc); goto fail3; } iova_base += PAGE_SIZE; } #else EFRM_ASSERT(0); #endif } return 0; fail3: while (i-- > 0) if (!vf_domain) { dma_unmap_page(dev, p->dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); } else { #ifdef CONFIG_SFC_RESOURCE_VF_IOMMU mutex_lock(&efrm_iommu_mutex); iommu_unmap(vf_domain, iova_base, PAGE_SIZE); mutex_unlock(&efrm_iommu_mutex); #endif } fail2: kfree(p->dma_addrs); fail1: return -ENOMEM; }
/** * lib_ring_buffer_backend_allocate - allocate a channel buffer * @config: ring buffer instance configuration * @buf: the buffer struct * @size: total size of the buffer * @num_subbuf: number of subbuffers * @extra_reader_sb: need extra subbuffer for reader */ static int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_backend *bufb, size_t size, size_t num_subbuf, int extra_reader_sb) { struct channel_backend *chanb = &bufb->chan->backend; unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0; unsigned long subbuf_size, mmap_offset = 0; unsigned long num_subbuf_alloc; struct page **pages; unsigned long i; num_pages = size >> PAGE_SHIFT; num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf); subbuf_size = chanb->subbuf_size; num_subbuf_alloc = num_subbuf; if (extra_reader_sb) { num_pages += num_pages_per_subbuf; /* Add pages for reader */ num_subbuf_alloc++; } pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages, 1 << INTERNODE_CACHE_SHIFT), cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!pages)) goto pages_error; bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array) * num_subbuf_alloc, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!bufb->array)) goto array_error; for (i = 0; i < num_pages; i++) { pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)), GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0); if (unlikely(!pages[i])) goto depopulate; } bufb->num_pages_per_subbuf = num_pages_per_subbuf; /* Allocate backend pages array elements */ for (i = 0; i < num_subbuf_alloc; i++) { bufb->array[i] = lttng_kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_pages) + sizeof(struct lib_ring_buffer_backend_page) * num_pages_per_subbuf, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, cpu_to_node(max(bufb->cpu, 0))); if (!bufb->array[i]) goto free_array; } /* Allocate write-side subbuffer table */ bufb->buf_wsb = lttng_kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!bufb->buf_wsb)) goto free_array; for (i = 0; i < num_subbuf; i++) bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i); /* Assign read-side subbuffer table */ if (extra_reader_sb) bufb->buf_rsb.id = subbuffer_id(config, 0, 1, num_subbuf_alloc - 1); else bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); /* Allocate subbuffer packet counter table */ bufb->buf_cnt = lttng_kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_counts) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!bufb->buf_cnt)) goto free_wsb; /* Assign pages to page index */ for (i = 0; i < num_subbuf_alloc; i++) { for (j = 0; j < num_pages_per_subbuf; j++) { CHAN_WARN_ON(chanb, page_idx > num_pages); bufb->array[i]->p[j].virt = page_address(pages[page_idx]); bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]); page_idx++; } if (config->output == RING_BUFFER_MMAP) { bufb->array[i]->mmap_offset = mmap_offset; mmap_offset += subbuf_size; } } /* * If kmalloc ever uses vmalloc underneath, make sure the buffer pages * will not fault. */ wrapper_vmalloc_sync_all(); vfree(pages); return 0; free_wsb: lttng_kvfree(bufb->buf_wsb); free_array: for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++) lttng_kvfree(bufb->array[i]); depopulate: /* Free all allocated pages */ for (i = 0; (i < num_pages && pages[i]); i++) __free_page(pages[i]); lttng_kvfree(bufb->array); array_error: vfree(pages); pages_error: return -ENOMEM; }
void * cfs_cpt_vmalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes) { return vmalloc_node(nr_bytes, cfs_cpt_spread_node(cptab, cpt)); }