static int quicktest1(unsigned long arg) { struct gru_message_queue_desc mqd; void *p, *mq; unsigned long *dw; int i, ret = -EIO; char mes[GRU_CACHE_LINE_BYTES], *m; /* Need 1K cacheline aligned that does not cross page boundary */ p = kmalloc(4096, 0); if (p == NULL) return -ENOMEM; mq = ALIGNUP(p, 1024); memset(mes, 0xee, sizeof(mes)); dw = mq; gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0); for (i = 0; i < 6; i++) { mes[8] = i; do { ret = gru_send_message_gpa(&mqd, mes, sizeof(mes)); } while (ret == MQE_CONGESTION); if (ret) break; } if (ret != MQE_QUEUE_FULL || i != 4) { // printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n", ; goto done; } for (i = 0; i < 6; i++) { m = gru_get_next_message(&mqd); if (!m || m[8] != i) break; gru_free_message(&mqd, m); } if (i != 4) { // printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n", ; goto done; } ret = 0; done: kfree(p); return ret; }
static struct xpc_gru_mq_uv * xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, irq_handler_t irq_handler) { enum xp_retval xp_ret; int ret; int nid; int nasid; int pg_order; struct page *page; struct xpc_gru_mq_uv *mq; struct uv_IO_APIC_route_entry *mmr_value; mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); if (mq == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " "a xpc_gru_mq_uv structure\n"); ret = -ENOMEM; goto out_0; } mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc), GFP_KERNEL); if (mq->gru_mq_desc == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " "a gru_message_queue_desc structure\n"); ret = -ENOMEM; goto out_1; } pg_order = get_order(mq_size); mq->order = pg_order + PAGE_SHIFT; mq_size = 1UL << mq->order; mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); page = __alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); ret = -ENOMEM; goto out_2; } mq->address = page_address(page); /* enable generation of irq when GRU mq operation occurs to this mq */ ret = xpc_gru_mq_watchlist_alloc_uv(mq); if (ret != 0) goto out_3; ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); if (ret != 0) goto out_4; ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); if (ret != 0) { dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", mq->irq, -ret); goto out_5; } nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu)); mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value; ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size, nasid, mmr_value->vector, mmr_value->dest); if (ret != 0) { dev_err(xpc_part, "gru_create_message_queue() returned " "error=%d\n", ret); ret = -EINVAL; goto out_6; } /* allow other partitions to access this GRU mq */ xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); if (xp_ret != xpSuccess) { ret = -EACCES; goto out_6; } return mq; /* something went wrong */ out_6: free_irq(mq->irq, NULL); out_5: xpc_release_gru_mq_irq_uv(mq); out_4: xpc_gru_mq_watchlist_free_uv(mq); out_3: free_pages((unsigned long)mq->address, pg_order); out_2: kfree(mq->gru_mq_desc); out_1: kfree(mq); out_0: return ERR_PTR(ret); }