static int xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) { int ret; #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address), mq->order, &mq->mmr_offset); if (ret < 0) { dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", ret); return -EBUSY; } #elif defined CONFIG_X86_64 ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address), mq->order, &mq->mmr_offset); if (ret < 0) { dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " "ret=%d\n", ret); return ret; } #else #error not a supported configuration #endif mq->watchlist_num = ret; return 0; }
static int quicktest(struct gru_state *gru) { void *cb; void *ds; unsigned long *p; cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); p = ds; word0 = MAGIC; gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA); if (gru_wait(cb) != CBS_IDLE) BUG(); if (*(unsigned long *)ds != MAGIC) BUG(); gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA); if (gru_wait(cb) != CBS_IDLE) BUG(); if (word0 != word1 || word0 != MAGIC) { printk ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n", gru->gs_gid, word1, MAGIC); BUG(); /* ZZZ should not be fatal */ } return 0; }
static int xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp) { xpc_heartbeat_uv = &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat; rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv); rp->sn.uv.activate_gru_mq_desc_gpa = uv_gpa(xpc_activate_mq_uv->gru_mq_desc); return 0; }
static int quicktest3(unsigned long arg) { char buf1[BUFSIZE], buf2[BUFSIZE]; int ret = 0; memset(buf2, 0, sizeof(buf2)); memset(buf1, get_cycles() & 255, sizeof(buf1)); gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE); if (memcmp(buf1, buf2, BUFSIZE)) { ; ret = -EIO; } return ret; }
static int quicktest3(unsigned long arg) { char buf1[BUFSIZE], buf2[BUFSIZE]; int ret = 0; memset(buf2, 0, sizeof(buf2)); memset(buf1, get_cycles() & 255, sizeof(buf1)); gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE); if (memcmp(buf1, buf2, BUFSIZE)) { printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id()); ret = -EIO; } return ret; }
static void xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, unsigned long remote_rp_gpa, int nasid) { short partid = remote_rp->SAL_partid; struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_activate_mq_msg_activate_req_uv msg; part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; part->sn.uv.activate_gru_mq_desc_gpa = remote_rp->sn.uv.activate_gru_mq_desc_gpa; /* * ??? Is it a good idea to make this conditional on what is * ??? potentially stale state information? */ if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { msg.rp_gpa = uv_gpa(xpc_rsvd_page); msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; msg.activate_gru_mq_desc_gpa = xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); } if (part->act_state == XPC_P_AS_INACTIVE) xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); }
static void xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, unsigned long remote_rp_gpa, int nasid) { short partid = remote_rp->SAL_partid; struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_activate_mq_msg_activate_req_uv msg; part->remote_rp_pa = remote_rp_gpa; part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; part->sn.uv.activate_gru_mq_desc_gpa = remote_rp->sn.uv.activate_gru_mq_desc_gpa; if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { msg.rp_gpa = uv_gpa(xpc_rsvd_page); msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; msg.activate_gru_mq_desc_gpa = xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); } if (part->act_state == XPC_P_AS_INACTIVE) xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); }
static int quicktest0(unsigned long arg) { unsigned long word0; unsigned long word1; void *cb; void *dsr; unsigned long *p; int ret = -EIO; if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr)) return MQE_BUG_NO_RESOURCES; p = dsr; word0 = MAGIC; word1 = 0; gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); if (gru_wait(cb) != CBS_IDLE) { ; goto done; } if (*p != MAGIC) { printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p); goto done; } gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); if (gru_wait(cb) != CBS_IDLE) { ; goto done; } if (word0 != word1 || word1 != MAGIC) { // printk(KERN_DEBUG // "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n", ; goto done; } ret = 0; done: gru_free_cpu_resources(cb, dsr); return ret; }
static void xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) { struct xpc_activate_mq_msg_chctl_openreply_uv msg; msg.ch_number = ch->number; msg.local_nentries = ch->local_nentries; msg.remote_nentries = ch->remote_nentries; msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc); xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); }
static enum xp_retval xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc, unsigned long gru_mq_desc_gpa) { enum xp_retval ret; ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa, sizeof(struct gru_message_queue_desc)); if (ret == xpSuccess) gru_mq_desc->mq = NULL; return ret; }
static int quicktest2(unsigned long arg) { static DECLARE_COMPLETION(cmp); unsigned long han; int blade_id = 0; int numcb = 4; int ret = 0; unsigned long *buf; void *cb0, *cb; int i, k, istatus, bytes; bytes = numcb * 4 * 8; buf = kmalloc(bytes, GFP_KERNEL); if (!buf) return -ENOMEM; ret = -EBUSY; han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp); if (!han) goto done; gru_lock_async_resource(han, &cb0, NULL); memset(buf, 0xee, bytes); for (i = 0; i < numcb; i++) gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0, XTYPE_DW, 4, 1, IMA_INTERRUPT); ret = 0; for (k = 0; k < numcb; k++) { gru_wait_async_cbr(han); for (i = 0; i < numcb; i++) { cb = cb0 + i * GRU_HANDLE_STRIDE; istatus = gru_check_status(cb); if (istatus == CBS_ACTIVE) continue; if (istatus == CBS_EXCEPTION) ret = -EFAULT; else if (buf[i] || buf[i + 1] || buf[i + 2] || buf[i + 3]) ret = -EIO; } } BUG_ON(cmp.done); gru_unlock_async_resource(han); gru_release_async_resources(han); done: kfree(buf); return ret; }
static enum xp_retval xpc_get_remote_heartbeat_uv(struct xpc_partition *part) { struct xpc_partition_uv *part_uv = &part->sn.uv; enum xp_retval ret; ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat), part_uv->heartbeat_gpa, sizeof(struct xpc_heartbeat_uv)); if (ret != xpSuccess) return ret; if (part_uv->cached_heartbeat.value == part->last_heartbeat && !part_uv->cached_heartbeat.offline) { ret = xpNoHeartbeat; } else { part->last_heartbeat = part_uv->cached_heartbeat.value; } return ret; }
/* * Create a message queue. * qlines - message queue size in cache lines. Includes 2-line header. */ int gru_create_message_queue(struct gru_message_queue_desc *mqd, void *p, unsigned int bytes, int nasid, int vector, int apicid) { struct message_queue *mq = p; unsigned int qlines; qlines = bytes / GRU_CACHE_LINE_BYTES - 2; memset(mq, 0, bytes); mq->start = &mq->data; mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES; mq->next = &mq->data; mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES; mq->qlines = qlines; mq->hstatus[0] = 0; mq->hstatus[1] = 1; mq->head = gru_mesq_head(2, qlines / 2 + 1); mqd->mq = mq; mqd->mq_gpa = uv_gpa(mq); mqd->qlines = qlines; mqd->interrupt_pnode = nasid >> 1; mqd->interrupt_vector = vector; mqd->interrupt_apicid = apicid; return 0; }
static int quicktest2(unsigned long arg) { static DECLARE_COMPLETION(cmp); unsigned long han; int blade_id = 0; int numcb = 4; int ret = 0; unsigned long *buf; void *cb0, *cb; struct gru_control_block_status *gen; int i, k, istatus, bytes; bytes = numcb * 4 * 8; buf = kmalloc(bytes, GFP_KERNEL); if (!buf) return -ENOMEM; ret = -EBUSY; han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp); if (!han) goto done; gru_lock_async_resource(han, &cb0, NULL); memset(buf, 0xee, bytes); for (i = 0; i < numcb; i++) gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0, XTYPE_DW, 4, 1, IMA_INTERRUPT); ret = 0; k = numcb; do { gru_wait_async_cbr(han); for (i = 0; i < numcb; i++) { cb = cb0 + i * GRU_HANDLE_STRIDE; istatus = gru_check_status(cb); if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS) break; } if (i == numcb) continue; if (istatus != CBS_IDLE) { ; ret = -EFAULT; } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] || buf[4 * i + 3]) { // printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n", ; ret = -EIO; } k--; gen = cb; gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */ } while (k); BUG_ON(cmp.done); gru_unlock_async_resource(han); gru_release_async_resources(han); done: kfree(buf); return ret; }
/* * Convert a virtual memory address to a physical memory address. */ static unsigned long xp_pa_uv(void *addr) { return uv_gpa(addr); }