Exemple #1
0
void
identify_cpu (struct cpuinfo_ia64 *c)
{
	union {
		unsigned long bits[5];
		struct {
			/* id 0 & 1: */
			char vendor[16];

			/* id 2 */
			u64 ppn;		/* processor serial number */

			/* id 3: */
			unsigned number		:  8;
			unsigned revision	:  8;
			unsigned model		:  8;
			unsigned family		:  8;
			unsigned archrev	:  8;
			unsigned reserved	: 24;

			/* id 4: */
			u64 features;
		} field;
	} cpuid;
	pal_vm_info_1_u_t vm1;
	pal_vm_info_2_u_t vm2;
	pal_status_t status;
	unsigned long impl_va_msb = 50, phys_addr_size = 44;	/* Itanium defaults */
	int i;

	for (i = 0; i < 5; ++i)
		cpuid.bits[i] = ia64_get_cpuid(i);

	memcpy(c->vendor, cpuid.field.vendor, 16);
#ifdef CONFIG_SMP
	c->cpu = smp_processor_id();
#endif
	c->ppn = cpuid.field.ppn;
	c->number = cpuid.field.number;
	c->revision = cpuid.field.revision;
	c->model = cpuid.field.model;
	c->family = cpuid.field.family;
	c->archrev = cpuid.field.archrev;
	c->features = cpuid.field.features;

	status = ia64_pal_vm_summary(&vm1, &vm2);
	if (status == PAL_STATUS_SUCCESS) {
		impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
		phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
	}
	printk(KERN_INFO "CPU %d: %lu virtual and %lu physical address bits\n",
	       smp_processor_id(), impl_va_msb + 1, phys_addr_size);
	c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
	c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
}
Exemple #2
0
/*
 * cpu_init() initializes state that is per-CPU.  This function acts
 * as a 'CPU state barrier', nothing should get across.
 */
void
cpu_init (void)
{
	extern void __devinit ia64_mmu_init (void *);
	unsigned long num_phys_stacked;
	pal_vm_info_2_u_t vmi;
	unsigned int max_ctx;
	struct cpuinfo_ia64 *cpu_info;
	void *cpu_data;

	cpu_data = per_cpu_init();

	get_max_cacheline_size();

	/*
	 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
	 * ia64_mmu_init() yet.  And we can't call ia64_mmu_init() first because it
	 * depends on the data returned by identify_cpu().  We break the dependency by
	 * accessing cpu_data() through the canonical per-CPU address.
	 */
	cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
	identify_cpu(cpu_info);

#ifdef CONFIG_MCKINLEY
	{
#		define FEATURE_SET 16
		struct ia64_pal_retval iprv;

		if (cpu_info->family == 0x1f) {
			PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
			if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
				PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
				              (iprv.v1 | 0x80), FEATURE_SET, 0);
		}
	}
#endif

	/* Clear the stack memory reserved for pt_regs: */
	memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));

	ia64_set_kr(IA64_KR_FPU_OWNER, 0);

	/*
	 * Initialize default control register to defer all speculative faults.  The
	 * kernel MUST NOT depend on a particular setting of these bits (in other words,
	 * the kernel must have recovery code for all speculative accesses).  Turn on
	 * dcr.lc as per recommendation by the architecture team.  Most IA-32 apps
	 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
	 * be fine).
	 */
	ia64_setreg(_IA64_REG_CR_DCR,  (  IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
					| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	if (current->mm)
		BUG();

	ia64_mmu_init(ia64_imva(cpu_data));

#ifdef CONFIG_IA32_SUPPORT
	ia32_cpu_init();
#endif

	/* Clear ITC to eliminiate sched_clock() overflows in human time.  */
	ia64_set_itc(0);

	/* disable all local interrupt sources: */
	ia64_set_itv(1 << 16);
	ia64_set_lrr0(1 << 16);
	ia64_set_lrr1(1 << 16);
	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);

	/* clear TPR & XTP to enable all interrupt classes: */
	ia64_setreg(_IA64_REG_CR_TPR, 0);
#ifdef CONFIG_SMP
	normal_xtp();
#endif

	/* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
	if (ia64_pal_vm_summary(NULL, &vmi) == 0)
		max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
	else {
		printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
		max_ctx = (1U << 15) - 1;	/* use architected minimum */
	}
	while (max_ctx < ia64_ctx.max_ctx) {
		unsigned int old = ia64_ctx.max_ctx;
		if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
			break;
	}

	if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
		printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
		       "stacked regs\n");
		num_phys_stacked = 96;
	}
	/* size of physical stacked register partition plus 8 bytes: */
	__get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
	platform_cpu_init();
}
static int
tr_info(char *page)
{
	char *p = page;
	long status;
	pal_tr_valid_u_t tr_valid;
	u64 tr_buffer[4];
	pal_vm_info_1_u_t vm_info_1;
	pal_vm_info_2_u_t vm_info_2;
	unsigned long i, j;
	unsigned long max[3], pgm;
	struct ifa_reg {
		unsigned long valid:1;
		unsigned long ig:11;
		unsigned long vpn:52;
	} *ifa_reg;
	struct itir_reg {
		unsigned long rv1:2;
		unsigned long ps:6;
		unsigned long key:24;
		unsigned long rv2:32;
	} *itir_reg;
	struct gr_reg {
		unsigned long p:1;
		unsigned long rv1:1;
		unsigned long ma:3;
		unsigned long a:1;
		unsigned long d:1;
		unsigned long pl:2;
		unsigned long ar:3;
		unsigned long ppn:38;
		unsigned long rv2:2;
		unsigned long ed:1;
		unsigned long ig:11;
	} *gr_reg;
	struct rid_reg {
		unsigned long ig1:1;
		unsigned long rv1:1;
		unsigned long ig2:6;
		unsigned long rid:24;
		unsigned long rv2:32;
	} *rid_reg;

	if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
		return 0;
	}
	max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
	max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;

	for (i=0; i < 2; i++ ) {
		for (j=0; j < max[i]; j++) {

		status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
		if (status != 0) {
			printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
			       i, j, status);
			continue;
		}

		ifa_reg  = (struct ifa_reg *)&tr_buffer[2];

		if (ifa_reg->valid == 0) continue;

		gr_reg   = (struct gr_reg *)tr_buffer;
		itir_reg = (struct itir_reg *)&tr_buffer[1];
		rid_reg  = (struct rid_reg *)&tr_buffer[3];

		pgm	 = -1 << (itir_reg->ps - 12);
		p += sprintf(p,
			     "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
			     "\tppn  : 0x%lx\n"
			     "\tvpn  : 0x%lx\n"
			     "\tps   : ",
			     "ID"[i], j,
			     tr_valid.pal_tr_valid_s.access_rights_valid,
			     tr_valid.pal_tr_valid_s.priv_level_valid,
			     tr_valid.pal_tr_valid_s.dirty_bit_valid,
			     tr_valid.pal_tr_valid_s.mem_attr_valid,
			     (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);

		p = bitvector_process(p, 1<< itir_reg->ps);

		p += sprintf(p,
			     "\n\tpl   : %d\n"
			     "\tar   : %d\n"
			     "\trid  : %x\n"
			     "\tp    : %d\n"
			     "\tma   : %d\n"
			     "\td    : %d\n",
			     gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
			     gr_reg->d);
		}
	}
	return p - page;
}
static int
vm_info(char *page)
{
	char *p = page;
	u64 tr_pages =0, vw_pages=0, tc_pages;
	u64 attrib;
	pal_vm_info_1_u_t vm_info_1;
	pal_vm_info_2_u_t vm_info_2;
	pal_tc_info_u_t	tc_info;
	ia64_ptce_info_t ptce;
	const char *sep;
	int i, j;
	long status;

	if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
	} else {

		p += sprintf(p,
		     "Physical Address Space         : %d bits\n"
		     "Virtual Address Space          : %d bits\n"
		     "Protection Key Registers(PKR)  : %d\n"
		     "Implemented bits in PKR.key    : %d\n"
		     "Hash Tag ID                    : 0x%x\n"
		     "Size of RR.rid                 : %d\n"
		     "Max Purges                     : ",
		     vm_info_1.pal_vm_info_1_s.phys_add_size,
		     vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
		     vm_info_1.pal_vm_info_1_s.max_pkr+1,
		     vm_info_1.pal_vm_info_1_s.key_size,
		     vm_info_1.pal_vm_info_1_s.hash_tag_id,
		     vm_info_2.pal_vm_info_2_s.rid_size);
		if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
			p += sprintf(p, "unlimited\n");
		else
			p += sprintf(p, "%d\n",
		     		vm_info_2.pal_vm_info_2_s.max_purges ?
				vm_info_2.pal_vm_info_2_s.max_purges : 1);
	}

	if (ia64_pal_mem_attrib(&attrib) == 0) {
		p += sprintf(p, "Supported memory attributes    : ");
		sep = "";
		for (i = 0; i < 8; i++) {
			if (attrib & (1 << i)) {
				p += sprintf(p, "%s%s", sep, mem_attrib[i]);
				sep = ", ";
			}
		}
		p += sprintf(p, "\n");
	}

	if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
		printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
	} else {

		p += sprintf(p,
			     "\nTLB walker                     : %simplemented\n"
			     "Number of DTR                  : %d\n"
			     "Number of ITR                  : %d\n"
			     "TLB insertable page sizes      : ",
			     vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
			     vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
			     vm_info_1.pal_vm_info_1_s.max_itr_entry+1);


		p = bitvector_process(p, tr_pages);

		p += sprintf(p, "\nTLB purgeable page sizes       : ");

		p = bitvector_process(p, vw_pages);
	}
	if ((status=ia64_get_ptce(&ptce)) != 0) {
		printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
	} else {
		p += sprintf(p,
		     "\nPurge base address             : 0x%016lx\n"
		     "Purge outer loop count         : %d\n"
		     "Purge inner loop count         : %d\n"
		     "Purge outer loop stride        : %d\n"
		     "Purge inner loop stride        : %d\n",
		     ptce.base, ptce.count[0], ptce.count[1],
		     ptce.stride[0], ptce.stride[1]);

		p += sprintf(p,
		     "TC Levels                      : %d\n"
		     "Unique TC(s)                   : %d\n",
		     vm_info_1.pal_vm_info_1_s.num_tc_levels,
		     vm_info_1.pal_vm_info_1_s.max_unique_tcs);

		for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
			for (j=2; j>0 ; j--) {
				tc_pages = 0; /* just in case */


				/* even without unification, some levels may not be present */
				if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
					continue;
				}

				p += sprintf(p,
				     "\n%s Translation Cache Level %d:\n"
				     "\tHash sets           : %d\n"
				     "\tAssociativity       : %d\n"
				     "\tNumber of entries   : %d\n"
				     "\tFlags               : ",
				     cache_types[j+tc_info.tc_unified], i+1,
				     tc_info.tc_num_sets,
				     tc_info.tc_associativity,
				     tc_info.tc_num_entries);

				if (tc_info.tc_pf)
					p += sprintf(p, "PreferredPageSizeOptimized ");
				if (tc_info.tc_unified)
					p += sprintf(p, "Unified ");
				if (tc_info.tc_reduce_tr)
					p += sprintf(p, "TCReduction");

				p += sprintf(p, "\n\tSupported page sizes: ");

				p = bitvector_process(p, tc_pages);

				/* when unified date (j=2) is enough */
				if (tc_info.tc_unified)
					break;
			}
		}
	}
	p += sprintf(p, "\n");

	return p - page;
}
Exemple #5
0
static void __cpuinit
identify_cpu (struct cpuinfo_ia64 *c)
{
	union {
		unsigned long bits[5];
		struct {
			/* id 0 & 1: */
			char vendor[16];

			/* id 2 */
			u64 ppn;		/* processor serial number */

			/* id 3: */
			unsigned number		:  8;
			unsigned revision	:  8;
			unsigned model		:  8;
			unsigned family		:  8;
			unsigned archrev	:  8;
			unsigned reserved	: 24;

			/* id 4: */
			u64 features;
		} field;
	} cpuid;
	pal_vm_info_1_u_t vm1;
	pal_vm_info_2_u_t vm2;
	pal_status_t status;
	unsigned long impl_va_msb = 50, phys_addr_size = 44;	/* Itanium defaults */
	int i;

	for (i = 0; i < 5; ++i)
		cpuid.bits[i] = ia64_get_cpuid(i);

	memcpy(c->vendor, cpuid.field.vendor, 16);
#ifdef CONFIG_SMP
	c->cpu = smp_processor_id();

	/* below default values will be overwritten  by identify_siblings() 
	 * for Multi-Threading/Multi-Core capable cpu's
	 */
	c->threads_per_core = c->cores_per_socket = c->num_log = 1;
	c->socket_id = -1;

	identify_siblings(c);
#endif
	c->ppn = cpuid.field.ppn;
	c->number = cpuid.field.number;
	c->revision = cpuid.field.revision;
	c->model = cpuid.field.model;
	c->family = cpuid.field.family;
	c->archrev = cpuid.field.archrev;
	c->features = cpuid.field.features;

	status = ia64_pal_vm_summary(&vm1, &vm2);
	if (status == PAL_STATUS_SUCCESS) {
		impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
		phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
	}
	c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
	c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
}