/* * Fill the partition reserved page with the information needed by * other partitions to discover we are alive and establish initial * communications. */ struct xpc_rsvd_page * xpc_rsvd_page_init(void) { struct xpc_rsvd_page *rp; AMO_t *amos_page; u64 rp_pa, nasid_array = 0; int i, ret; /* get the local reserved page's address */ preempt_disable(); rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id())); preempt_enable(); if (rp_pa == 0) { dev_err(xpc_part, "SAL failed to locate the reserved page\n"); return NULL; } rp = (struct xpc_rsvd_page *) __va(rp_pa); if (rp->partid != sn_partition_id) { dev_err(xpc_part, "the reserved page's partid of %d should be " "%d\n", rp->partid, sn_partition_id); return NULL; } rp->version = XPC_RP_VERSION; /* establish the actual sizes of the nasid masks */ if (rp->SAL_version == 1) { /* SAL_version 1 didn't set the nasids_size field */ rp->nasids_size = 128; } xp_nasid_mask_bytes = rp->nasids_size; xp_nasid_mask_words = xp_nasid_mask_bytes / 8; /* setup the pointers to the various items in the reserved page */ xpc_part_nasids = XPC_RP_PART_NASIDS(rp); xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); xpc_vars = XPC_RP_VARS(rp); xpc_vars_part = XPC_RP_VARS_PART(rp); /* * Before clearing xpc_vars, see if a page of AMOs had been previously * allocated. If not we'll need to allocate one and set permissions * so that cross-partition AMOs are allowed. * * The allocated AMO page needs MCA reporting to remain disabled after * XPC has unloaded. To make this work, we keep a copy of the pointer * to this page (i.e., amos_page) in the struct xpc_vars structure, * which is pointed to by the reserved page, and re-use that saved copy * on subsequent loads of XPC. This AMO page is never freed, and its * memory protections are never restricted. */ if ((amos_page = xpc_vars->amos_page) == NULL) { amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0)); if (amos_page == NULL) { dev_err(xpc_part, "can't allocate page of AMOs\n"); return NULL; } /* * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems * when xpc_allow_IPI_ops() is called via xpc_hb_init(). */ if (!enable_shub_wars_1_1()) { ret = sn_change_memprotect(ia64_tpa((u64) amos_page), PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, &nasid_array); if (ret != 0) { dev_err(xpc_part, "can't change memory " "protections\n"); uncached_free_page(__IA64_UNCACHED_OFFSET | TO_PHYS((u64) amos_page)); return NULL; } } } else if (!IS_AMO_ADDRESS((u64) amos_page)) { /* * EFI's XPBOOT can also set amos_page in the reserved page, * but it happens to leave it as an uncached physical address * and we need it to be an uncached virtual, so we'll have to * convert it. */ if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { dev_err(xpc_part, "previously used amos_page address " "is bad = 0x%p\n", (void *) amos_page); return NULL; } amos_page = (AMO_t *) TO_AMO((u64) amos_page); } /* clear xpc_vars */ memset(xpc_vars, 0, sizeof(struct xpc_vars)); xpc_vars->version = XPC_V_VERSION; xpc_vars->act_nasid = cpuid_to_nasid(0); xpc_vars->act_phys_cpuid = cpu_physical_id(0); xpc_vars->vars_part_pa = __pa(xpc_vars_part); xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page); xpc_vars->amos_page = amos_page; /* save for next load of XPC */ /* clear xpc_vars_part */ memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) * XP_MAX_PARTITIONS); /* initialize the activate IRQ related AMO variables */ for (i = 0; i < xp_nasid_mask_words; i++) { (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); } /* initialize the engaged remote partitions related AMO variables */ (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); /* timestamp of when reserved page was setup by XPC */ rp->stamp = CURRENT_TIME; /* * This signifies to the remote partition that our reserved * page is initialized. */ rp->vars_pa = __pa(xpc_vars); return rp; }
/* * Fill the partition reserved page with the information needed by * other partitions to discover we are alive and establish initial * communications. */ int xpc_setup_rsvd_page(void) { int ret; struct xpc_rsvd_page *rp; unsigned long rp_pa; unsigned long new_ts_jiffies; /* get the local reserved page's address */ preempt_disable(); rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id())); preempt_enable(); if (rp_pa == 0) { dev_err(xpc_part, "SAL failed to locate the reserved page\n"); return -ESRCH; } rp = (struct xpc_rsvd_page *)__va(rp_pa); if (rp->SAL_version < 3) { /* SAL_versions < 3 had a SAL_partid defined as a u8 */ rp->SAL_partid &= 0xff; } BUG_ON(rp->SAL_partid != xp_partition_id); if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) { dev_err(xpc_part, "the reserved page's partid of %d is outside " "supported range (< 0 || >= %d)\n", rp->SAL_partid, xp_max_npartitions); return -EINVAL; } rp->version = XPC_RP_VERSION; rp->max_npartitions = xp_max_npartitions; /* establish the actual sizes of the nasid masks */ if (rp->SAL_version == 1) { /* SAL_version 1 didn't set the nasids_size field */ rp->SAL_nasids_size = 128; } xpc_nasid_mask_nbytes = rp->SAL_nasids_size; xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size * BITS_PER_BYTE); /* setup the pointers to the various items in the reserved page */ xpc_part_nasids = XPC_RP_PART_NASIDS(rp); xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); ret = xpc_setup_rsvd_page_sn(rp); if (ret != 0) return ret; /* * Set timestamp of when reserved page was setup by XPC. * This signifies to the remote partition that our reserved * page is initialized. */ new_ts_jiffies = jiffies; if (new_ts_jiffies == 0 || new_ts_jiffies == rp->ts_jiffies) new_ts_jiffies++; rp->ts_jiffies = new_ts_jiffies; xpc_rsvd_page = rp; return 0; }