static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { struct kimage *image = arg; relocate_new_kernel_t rnk; void *pal_addr = efi_get_pal_addr(); unsigned long code_addr = (unsigned long)page_address(image->control_code_page); unsigned long vector; int ii; u64 fp, gp; ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump; BUG_ON(!image); if (image->type == KEXEC_TYPE_CRASH) { crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; /* Register noop init handler */ fp = ia64_tpa(init_handler->fp); gp = ia64_tpa(ia64_getreg(_IA64_REG_GP)); ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0); } else { /* Unregister init handlers of current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0); } /* Unregister mca handler - No more recovery on current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0); /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); vector = ia64_get_ivr(); while (vector != IA64_SPURIOUS_INT_VECTOR) { ia64_eoi(); vector = ia64_get_ivr(); } platform_kernel_launch_event(); rnk = (relocate_new_kernel_t)&code_addr; (*rnk)(image->head, image->start, ia64_boot_param, GRANULEROUNDDOWN((unsigned long) pal_addr)); BUG(); }
/* * Request address space for all standard resources */ static int __init register_memory(void) { code_resource.start = ia64_tpa(_text); code_resource.end = ia64_tpa(_etext) - 1; data_resource.start = ia64_tpa(_etext); data_resource.end = ia64_tpa(_end) - 1; efi_initialize_iomem_resources(&code_resource, &data_resource); return 0; }
/* * Given a nasid, get the physical address of the partition's reserved page * for that nasid. This function returns 0 on any error. */ static u64 xpc_get_rsvd_page_pa(int nasid) { bte_result_t bte_res; s64 status; u64 cookie = 0; u64 rp_pa = nasid; /* seed with nasid */ u64 len = 0; u64 buf = buf; u64 buf_len = 0; void *buf_base = NULL; while (1) { status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, &len); dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" "0x%016lx, address=0x%016lx, len=0x%016lx\n", status, cookie, rp_pa, len); if (status != SALRET_MORE_PASSES) { break; } if (L1_CACHE_ALIGN(len) > buf_len) { if (buf_base != NULL) { kfree(buf_base); } buf_len = L1_CACHE_ALIGN(len); buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, &buf_base); if (buf_base == NULL) { dev_err(xpc_part, "unable to kmalloc " "len=0x%016lx\n", buf_len); status = SALRET_ERROR; break; } } bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len, (BTE_NOTIFY | BTE_WACQUIRE), NULL); if (bte_res != BTE_SUCCESS) { dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); status = SALRET_ERROR; break; } } if (buf_base != NULL) { kfree(buf_base); } if (status != SALRET_OK) { rp_pa = 0; } dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); return rp_pa; }
/* * Assume that CPUs have been discovered by some platform-dependent interface. For * SoftSDV/Lion, that would be ACPI. * * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP(). */ void __init init_smp_config(void) { struct fptr { unsigned long fp; unsigned long gp; } *ap_startup; long sal_ret; /* Tell SAL where to drop the APs. */ ap_startup = (struct fptr *) start_ap; sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0); if (sal_ret < 0) printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n", ia64_sal_strerror(sal_ret)); }
/* It's user's responsibility to call the PAL procedure on a specific * processor. The cpu number in driver is only used for storing data. */ static ssize_t store_call_start(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; unsigned long call_start = simple_strtoull(buf, NULL, 16); #ifdef ERR_INJ_DEBUG printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]); printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]); printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3); #endif switch (call_start) { case 0: /* Do nothing. */ break; case 1: /* Call pal_mc_error_inject in physical mode. */ status[cpu]=ia64_pal_mc_error_inject_phys(err_type_info[cpu], err_struct_info[cpu], ia64_tpa(&err_data_buffer[cpu]), &capabilities[cpu], &resources[cpu]); break; case 2: /* Call pal_mc_error_inject in virtual mode. */ status[cpu]=ia64_pal_mc_error_inject_virt(err_type_info[cpu], err_struct_info[cpu], ia64_tpa(&err_data_buffer[cpu]), &capabilities[cpu], &resources[cpu]); break; default: status[cpu] = -EINVAL; break; } #ifdef ERR_INJ_DEBUG printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); printk(KERN_DEBUG "capapbilities=%lx,\n", capabilities[cpu]); printk(KERN_DEBUG "resources=%lx\n", resources[cpu]); #endif return size; }
static int machine_kexec_get_xenheap(xen_kexec_range_t *range) { range->start = (ia64_tpa(_end) + (ELF_PAGE_SIZE - 1)) & ELF_PAGE_MASK; range->size = (((unsigned long)range->start + KERNEL_TR_PAGE_SIZE) & ~(KERNEL_TR_PAGE_SIZE - 1)) - (unsigned long)range->start; return 0; }
void ia64_mca_init(void) { struct ia64_sal_result result; uint64_t max_size; char *p; int i; /* * Get the sizes of the state information we can get from SAL and * allocate a common block (forgive me my Fortran :-) for use by * support functions. We create a region 7 address to make it * easy on the OS_MCA or OS_INIT handlers to get the state info * under unreliable conditions. */ max_size = 0; for (i = 0; i < SAL_INFO_TYPES; i++) { result = ia64_sal_entry(SAL_GET_STATE_INFO_SIZE, i, 0, 0, 0, 0, 0, 0); if (result.sal_status == 0) { mca_info_size[i] = result.sal_result[0]; if (mca_info_size[i] > max_size) max_size = mca_info_size[i]; } else mca_info_size[i] = -1; } max_size = round_page(max_size); p = contigmalloc(max_size, M_TEMP, M_WAITOK, 0ul, 256*1024*1024 - 1, PAGE_SIZE, 256*1024*1024); mca_info_block = IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t)p)); if (bootverbose) printf("MCA: allocated %ld bytes for state information\n", max_size); /* * Initialize the spin lock used to protect the info block. When APs * get launched, there's a short moment of contention, but in all other * cases it's not a hot spot. I think it's possible to have the MCA * handler be called on multiple processors at the same time, but that * should be rare. On top of that, performance is not an issue when * dealing with machine checks... */ mtx_init(&mca_info_block_lock, "MCA spin lock", NULL, MTX_SPIN); /* * Get and save any processor and platfom error records. Note that in * a SMP configuration the processor records are for the BSP only. We * let the APs get and save their own records when we wake them up. */ for (i = 0; i < SAL_INFO_TYPES; i++) ia64_mca_save_state(i); }
/* * Given a nasid, get the physical address of the partition's reserved page * for that nasid. This function returns 0 on any error. */ static u64 xpc_get_rsvd_page_pa(int nasid, u64 buf, u64 buf_size) { bte_result_t bte_res; s64 status; u64 cookie = 0; u64 rp_pa = nasid; /* seed with nasid */ u64 len = 0; while (1) { status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, &len); dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" "0x%016lx, address=0x%016lx, len=0x%016lx\n", status, cookie, rp_pa, len); if (status != SALRET_MORE_PASSES) { break; } if (len > buf_size) { dev_err(xpc_part, "len (=0x%016lx) > buf_size\n", len); status = SALRET_ERROR; break; } bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); if (bte_res != BTE_SUCCESS) { dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); status = SALRET_ERROR; break; } } if (status != SALRET_OK) { rp_pa = 0; } dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); return rp_pa; }
int sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp, char **ssdt) { struct ia64_sal_retval ret_stuff; u64 busnum; u64 segment; ret_stuff.status = 0; ret_stuff.v0 = 0; segment = soft->pbi_buscommon.bs_persist_segment; busnum = soft->pbi_buscommon.bs_persist_busnum; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment, busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt), 0, 0); return (int)ret_stuff.v0; }
static ssize_t store_virtual_to_phys(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; u64 virt_addr=simple_strtoull(buf, NULL, 16); int ret; ret = get_user_pages(current, current->mm, virt_addr, 1, VM_READ, 0, NULL, NULL); if (ret<=0) { #ifdef ERR_INJ_DEBUG printk("Virtual address %lx is not existing.\n",virt_addr); #endif return -EINVAL; } phys_addr[cpu] = ia64_tpa(virt_addr); return size; }
/* * Fill the partition reserved page with the information needed by * other partitions to discover we are alive and establish initial * communications. */ struct xpc_rsvd_page * xpc_rsvd_page_init(void) { struct xpc_rsvd_page *rp; AMO_t *amos_page; u64 rp_pa, nasid_array = 0; int i, ret; /* get the local reserved page's address */ preempt_disable(); rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id())); preempt_enable(); if (rp_pa == 0) { dev_err(xpc_part, "SAL failed to locate the reserved page\n"); return NULL; } rp = (struct xpc_rsvd_page *) __va(rp_pa); if (rp->partid != sn_partition_id) { dev_err(xpc_part, "the reserved page's partid of %d should be " "%d\n", rp->partid, sn_partition_id); return NULL; } rp->version = XPC_RP_VERSION; /* establish the actual sizes of the nasid masks */ if (rp->SAL_version == 1) { /* SAL_version 1 didn't set the nasids_size field */ rp->nasids_size = 128; } xp_nasid_mask_bytes = rp->nasids_size; xp_nasid_mask_words = xp_nasid_mask_bytes / 8; /* setup the pointers to the various items in the reserved page */ xpc_part_nasids = XPC_RP_PART_NASIDS(rp); xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); xpc_vars = XPC_RP_VARS(rp); xpc_vars_part = XPC_RP_VARS_PART(rp); /* * Before clearing xpc_vars, see if a page of AMOs had been previously * allocated. If not we'll need to allocate one and set permissions * so that cross-partition AMOs are allowed. * * The allocated AMO page needs MCA reporting to remain disabled after * XPC has unloaded. To make this work, we keep a copy of the pointer * to this page (i.e., amos_page) in the struct xpc_vars structure, * which is pointed to by the reserved page, and re-use that saved copy * on subsequent loads of XPC. This AMO page is never freed, and its * memory protections are never restricted. */ if ((amos_page = xpc_vars->amos_page) == NULL) { amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0)); if (amos_page == NULL) { dev_err(xpc_part, "can't allocate page of AMOs\n"); return NULL; } /* * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems * when xpc_allow_IPI_ops() is called via xpc_hb_init(). */ if (!enable_shub_wars_1_1()) { ret = sn_change_memprotect(ia64_tpa((u64) amos_page), PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, &nasid_array); if (ret != 0) { dev_err(xpc_part, "can't change memory " "protections\n"); uncached_free_page(__IA64_UNCACHED_OFFSET | TO_PHYS((u64) amos_page)); return NULL; } } } else if (!IS_AMO_ADDRESS((u64) amos_page)) { /* * EFI's XPBOOT can also set amos_page in the reserved page, * but it happens to leave it as an uncached physical address * and we need it to be an uncached virtual, so we'll have to * convert it. */ if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { dev_err(xpc_part, "previously used amos_page address " "is bad = 0x%p\n", (void *) amos_page); return NULL; } amos_page = (AMO_t *) TO_AMO((u64) amos_page); } /* clear xpc_vars */ memset(xpc_vars, 0, sizeof(struct xpc_vars)); xpc_vars->version = XPC_V_VERSION; xpc_vars->act_nasid = cpuid_to_nasid(0); xpc_vars->act_phys_cpuid = cpu_physical_id(0); xpc_vars->vars_part_pa = __pa(xpc_vars_part); xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page); xpc_vars->amos_page = amos_page; /* save for next load of XPC */ /* clear xpc_vars_part */ memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) * XP_MAX_PARTITIONS); /* initialize the activate IRQ related AMO variables */ for (i = 0; i < xp_nasid_mask_words; i++) { (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); } /* initialize the engaged remote partitions related AMO variables */ (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); /* timestamp of when reserved page was setup by XPC */ rp->stamp = CURRENT_TIME; /* * This signifies to the remote partition that our reserved * page is initialized. */ rp->vars_pa = __pa(xpc_vars); return rp; }
/* * Fill the partition reserved page with the information needed by * other partitions to discover we are alive and establish initial * communications. */ struct xpc_rsvd_page * xpc_rsvd_page_init(void) { struct xpc_rsvd_page *rp; AMO_t *amos_page; u64 rp_pa, next_cl, nasid_array = 0; int i, ret; /* get the local reserved page's address */ rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0), (u64) xpc_remote_copy_buffer, XPC_RSVD_PAGE_ALIGNED_SIZE); if (rp_pa == 0) { dev_err(xpc_part, "SAL failed to locate the reserved page\n"); return NULL; } rp = (struct xpc_rsvd_page *) __va(rp_pa); if (rp->partid != sn_partition_id) { dev_err(xpc_part, "the reserved page's partid of %d should be " "%d\n", rp->partid, sn_partition_id); return NULL; } rp->version = XPC_RP_VERSION; /* * Place the XPC variables on the cache line following the * reserved page structure. */ next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE; xpc_vars = (struct xpc_vars *) next_cl; /* * Before clearing xpc_vars, see if a page of AMOs had been previously * allocated. If not we'll need to allocate one and set permissions * so that cross-partition AMOs are allowed. * * The allocated AMO page needs MCA reporting to remain disabled after * XPC has unloaded. To make this work, we keep a copy of the pointer * to this page (i.e., amos_page) in the struct xpc_vars structure, * which is pointed to by the reserved page, and re-use that saved copy * on subsequent loads of XPC. This AMO page is never freed, and its * memory protections are never restricted. */ if ((amos_page = xpc_vars->amos_page) == NULL) { amos_page = (AMO_t *) mspec_kalloc_page(0); if (amos_page == NULL) { dev_err(xpc_part, "can't allocate page of AMOs\n"); return NULL; } /* * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems * when xpc_allow_IPI_ops() is called via xpc_hb_init(). */ if (!enable_shub_wars_1_1()) { ret = sn_change_memprotect(ia64_tpa((u64) amos_page), PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, &nasid_array); if (ret != 0) { dev_err(xpc_part, "can't change memory " "protections\n"); mspec_kfree_page((unsigned long) amos_page); return NULL; } } } else if (!IS_AMO_ADDRESS((u64) amos_page)) { /* * EFI's XPBOOT can also set amos_page in the reserved page, * but it happens to leave it as an uncached physical address * and we need it to be an uncached virtual, so we'll have to * convert it. */ if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { dev_err(xpc_part, "previously used amos_page address " "is bad = 0x%p\n", (void *) amos_page); return NULL; } amos_page = (AMO_t *) TO_AMO((u64) amos_page); } memset(xpc_vars, 0, sizeof(struct xpc_vars)); /* * Place the XPC per partition specific variables on the cache line * following the XPC variables structure. */ next_cl += XPC_VARS_ALIGNED_SIZE; memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) * XP_MAX_PARTITIONS); xpc_vars_part = (struct xpc_vars_part *) next_cl; xpc_vars->vars_part_pa = __pa(next_cl); xpc_vars->version = XPC_V_VERSION; xpc_vars->act_nasid = cpuid_to_nasid(0); xpc_vars->act_phys_cpuid = cpu_physical_id(0); xpc_vars->amos_page = amos_page; /* save for next load of XPC */ /* * Initialize the activation related AMO variables. */ xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS); for (i = 1; i < XP_NASID_MASK_WORDS; i++) { xpc_IPI_init(i + XP_MAX_PARTITIONS); } /* export AMO page's physical address to other partitions */ xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page); /* * This signifies to the remote partition that our reserved * page is initialized. */ (volatile u64) rp->vars_pa = __pa(xpc_vars); return rp; }
void ia64_sal_init(void) { static int sizes[6] = { 48, 32, 16, 32, 16, 16 }; u_int8_t *p; int i; sal_systbl = efi_get_table(&sal_table); if (sal_systbl == NULL) return; if (memcmp(sal_systbl->sal_signature, SAL_SIGNATURE, 4)) { printf("Bad signature for SAL System Table\n"); return; } p = (u_int8_t *) (sal_systbl + 1); for (i = 0; i < sal_systbl->sal_entry_count; i++) { switch (*p) { case 0: { struct sal_entrypoint_descriptor *dp; dp = (struct sal_entrypoint_descriptor*)p; ia64_pal_entry = IA64_PHYS_TO_RR7(dp->sale_pal_proc); if (bootverbose) printf("PAL Proc at 0x%lx\n", ia64_pal_entry); sal_fdesc.func = IA64_PHYS_TO_RR7(dp->sale_sal_proc); sal_fdesc.gp = IA64_PHYS_TO_RR7(dp->sale_sal_gp); if (bootverbose) printf("SAL Proc at 0x%lx, GP at 0x%lx\n", sal_fdesc.func, sal_fdesc.gp); ia64_sal_entry = (sal_entry_t *) &sal_fdesc; break; } case 5: { struct sal_ap_wakeup_descriptor *dp; #ifdef SMP struct ia64_sal_result result; struct ia64_fdesc *fd; #endif dp = (struct sal_ap_wakeup_descriptor*)p; if (dp->sale_mechanism != 0) { printf("SAL: unsupported AP wake-up mechanism " "(%d)\n", dp->sale_mechanism); break; } if (dp->sale_vector < 0x10 || dp->sale_vector > 0xff) { printf("SAL: invalid AP wake-up vector " "(0x%lx)\n", dp->sale_vector); break; } /* * SAL documents that the wake-up vector should be * high (close to 255). The MCA rendezvous vector * should be less than the wake-up vector, but still * "high". We use the following priority assignment: * Wake-up: priority of the sale_vector * Rendezvous: priority-1 * Generic IPIs: priority-2 * Special IPIs: priority-3 * Consequently, the wake-up priority should be at * least 4 (ie vector >= 0x40). */ if (dp->sale_vector < 0x40) { printf("SAL: AP wake-up vector too low " "(0x%lx)\n", dp->sale_vector); break; } if (bootverbose) printf("SAL: AP wake-up vector: 0x%lx\n", dp->sale_vector); ipi_vector[IPI_AP_WAKEUP] = dp->sale_vector; setup_ipi_vectors(dp->sale_vector & 0xf0); #ifdef SMP fd = (struct ia64_fdesc *) os_boot_rendez; result = ia64_sal_entry(SAL_SET_VECTORS, SAL_OS_BOOT_RENDEZ, ia64_tpa(fd->func), ia64_tpa(fd->gp), 0, 0, 0, 0); #endif break; } } p += sizes[*p]; } if (ipi_vector[IPI_AP_WAKEUP] == 0) setup_ipi_vectors(0xf0); }
/* Translate virtual address to physical address. */ unsigned long xencomm_vtop(unsigned long vaddr) { struct page *page; struct vm_area_struct *vma; if (vaddr == 0) return 0UL; if (REGION_NUMBER(vaddr) == 5) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *ptep; /* On ia64, TASK_SIZE refers to current. It is not initialized during boot. Furthermore the kernel is relocatable and __pa() doesn't work on addresses. */ if (vaddr >= KERNEL_START && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE)) return vaddr - kernel_virtual_offset; /* In kernel area -- virtually mapped. */ pgd = pgd_offset_k(vaddr); if (pgd_none(*pgd) || pgd_bad(*pgd)) return ~0UL; pud = pud_offset(pgd, vaddr); if (pud_none(*pud) || pud_bad(*pud)) return ~0UL; pmd = pmd_offset(pud, vaddr); if (pmd_none(*pmd) || pmd_bad(*pmd)) return ~0UL; ptep = pte_offset_kernel(pmd, vaddr); if (!ptep) return ~0UL; return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK); } if (vaddr > TASK_SIZE) { /* percpu variables */ if (REGION_NUMBER(vaddr) == 7 && REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS)) ia64_tpa(vaddr); /* kernel address */ return __pa(vaddr); } vma = find_extend_vma(current->mm, vaddr); if (!vma) return ~0UL; /* We assume the page is modified. */ page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH); if (!page) return ~0UL; return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); }
static int machine_kexec_get_xen(xen_kexec_range_t *range) { range->start = range->start = ia64_tpa(_text); range->size = (unsigned long)_end - (unsigned long)_text; return 0; }
/* * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ void __cpuinit cpu_init (void) { extern void __cpuinit ia64_mmu_init (void *); unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; unsigned int max_ctx; struct cpuinfo_ia64 *cpu_info; void *cpu_data; cpu_data = per_cpu_init(); /* * We set ar.k3 so that assembly code in MCA handler can compute * physical addresses of per cpu variables with a simple: * phys = ar.k3 + &per_cpu_var */ ia64_set_kr(IA64_KR_PER_CPU_DATA, ia64_tpa(cpu_data) - (long) __per_cpu_start); get_max_cacheline_size(); /* * We can't pass "local_cpu_data" to identify_cpu() because we haven't called * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it * depends on the data returned by identify_cpu(). We break the dependency by * accessing cpu_data() through the canonical per-CPU address. */ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); identify_cpu(cpu_info); #ifdef CONFIG_MCKINLEY { # define FEATURE_SET 16 struct ia64_pal_retval iprv; if (cpu_info->family == 0x1f) { PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, (iprv.v1 | 0x80), FEATURE_SET, 0); } } #endif /* Clear the stack memory reserved for pt_regs: */ memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); ia64_set_kr(IA64_KR_FPU_OWNER, 0); /* * Initialize the page-table base register to a global * directory with all zeroes. This ensure that we can handle * TLB-misses to user address-space even before we created the * first user address-space. This may happen, e.g., due to * aggressive use of lfetch.fault. */ ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); /* * Initialize default control register to defer speculative faults except * for those arising from TLB misses, which are not deferred. The * kernel MUST NOT depend on a particular setting of these bits (in other words, * the kernel must have recovery code for all speculative accesses). Turn on * dcr.lc as per recommendation by the architecture team. Most IA-32 apps * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) BUG(); ia64_mmu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data)); #ifdef CONFIG_IA32_SUPPORT ia32_cpu_init(); #endif /* Clear ITC to eliminiate sched_clock() overflows in human time. */ ia64_set_itc(0); /* disable all local interrupt sources: */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* clear TPR & XTP to enable all interrupt classes: */ ia64_setreg(_IA64_REG_CR_TPR, 0); #ifdef CONFIG_SMP normal_xtp(); #endif /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ if (ia64_pal_vm_summary(NULL, &vmi) == 0) max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ } while (max_ctx < ia64_ctx.max_ctx) { unsigned int old = ia64_ctx.max_ctx; if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) break; } if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " "stacked regs\n"); num_phys_stacked = 96; } /* size of physical stacked register partition plus 8 bytes: */ __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; platform_cpu_init(); pm_idle = default_idle; }
unsigned long paddr_vmcoreinfo_note(void) { return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note); }
void xencomm_initialize(void) { kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START); is_xencomm_initialized = 1; }