Exemple #1
0
/*
 * This is to verify that we're looking at a real local APIC.
 * Check these against your board if the CPUs aren't getting
 * started for no apparent reason.
 */
int __init verify_local_APIC(void)
{
	unsigned int reg0, reg1;

	/*
	 * The version register is read-only in a real APIC.
	 */
	reg0 = apic_read(APIC_LVR);
	Dprintk("Getting VERSION: %x\n", reg0);
	apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
	reg1 = apic_read(APIC_LVR);
	Dprintk("Getting VERSION: %x\n", reg1);

	/*
	 * The two version reads above should print the same
	 * numbers.  If the second one is different, then we
	 * poke at a non-APIC.
	 */
	if (reg1 != reg0)
		return 0;

	/*
	 * Check if the version looks reasonably.
	 */
	reg1 = GET_APIC_VERSION(reg0);
	if (reg1 == 0x00 || reg1 == 0xff)
		return 0;
	reg1 = get_maxlvt();
	if (reg1 < 0x02 || reg1 == 0xff)
		return 0;

	/*
	 * The ID register is read/write in a real APIC.
	 */
	reg0 = apic_read(APIC_ID);
	Dprintk("Getting ID: %x\n", reg0);
	apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
	reg1 = apic_read(APIC_ID);
	Dprintk("Getting ID: %x\n", reg1);
	apic_write(APIC_ID, reg0);
	if (reg1 != (reg0 ^ APIC_ID_MASK))
		return 0;

	/*
	 * The next two are just to see if we have sane values.
	 * They're only really relevant if we're in Virtual Wire
	 * compatibility mode, but most boxes are anymore.
	 */
	reg0 = apic_read(APIC_LVT0);
	Dprintk("Getting LVT0: %x\n", reg0);
	reg1 = apic_read(APIC_LVT1);
	Dprintk("Getting LVT1: %x\n", reg1);

	return 1;
}
Exemple #2
0
/*
 * Activate a secondary processor.  head.S calls this.
 */
int __cpuinit
start_secondary (void *unused)
{
	/* Early console may use I/O ports */
	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
#ifndef CONFIG_PRINTK_TIME
	Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
#endif
	efi_map_pal_code();
	cpu_init();
	preempt_disable();
	smp_callin();

	cpu_idle();
	return 0;
}
Exemple #3
0
void __cpuinit numa_set_node(int cpu, int node)
{
	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);

	if (cpu_pda(cpu) && node != NUMA_NO_NODE)
		cpu_pda(cpu)->nodenumber = node;

	if (cpu_to_node_map)
		cpu_to_node_map[cpu] = node;

	else if (per_cpu_offset(cpu))
		per_cpu(x86_cpu_to_node_map, cpu) = node;

	else
		Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
}
Exemple #4
0
static __init void set_pte_phys(unsigned long vaddr,
			 unsigned long phys, pgprot_t prot)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte, new_pte;

	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);

	pgd = pgd_offset_k(vaddr);
	if (pgd_none(*pgd)) {
		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
		return;
	}
	pud = pud_offset(pgd, vaddr);
	if (pud_none(*pud)) {
		pmd = (pmd_t *) spp_getpage(); 
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
		if (pmd != pmd_offset(pud, 0)) {
			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
			return;
		}
	}
	pmd = pmd_offset(pud, vaddr);
	if (pmd_none(*pmd)) {
		pte = (pte_t *) spp_getpage();
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
		if (pte != pte_offset_kernel(pmd, 0)) {
			printk("PAGETABLE BUG #02!\n");
			return;
		}
	}
	new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);

	pte = pte_offset_kernel(pmd, vaddr);
	if (!pte_none(*pte) &&
	    pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
		pte_ERROR(*pte);
	set_pte(pte, new_pte);

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}
Exemple #5
0
static __init void *spp_getpage(void)
{ 
	void *ptr;
	if (after_bootmem)
		ptr = (void *) get_zeroed_page(GFP_ATOMIC); 
	else if (start_pfn < table_end) {
		ptr = __va(start_pfn << PAGE_SHIFT);
		start_pfn++;
		memset(ptr, 0, PAGE_SIZE);
	} else
		ptr = alloc_bootmem_pages(PAGE_SIZE);
	if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
		panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");

	Dprintk("spp_getpage %p\n", ptr);
	return ptr;
} 
Exemple #6
0
static void __init MP_bus_info (struct mpc_config_bus *m)
{
	char str[7];

	memcpy(str, m->mpc_bustype, 6);
	str[6] = 0;
	Dprintk("Bus #%d is %s\n", m->mpc_busid, str);

	if (strncmp(str, "ISA", 3) == 0) {
		set_bit(m->mpc_busid, mp_bus_not_pci);
	} else if (strncmp(str, "PCI", 3) == 0) {
		clear_bit(m->mpc_busid, mp_bus_not_pci);
		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
		mp_current_pci_id++;
	} else {
		printk(KERN_ERR "Unknown bustype %s\n", str);
	}
}
Exemple #7
0
static void __init MP_bus_info(struct mpc_config_bus *m)
{
    char str[7];

    memcpy(str, m->mpc_bustype, 6);
    str[6] = 0;

#ifdef CONFIG_X86_NUMAQ
    mpc_oem_bus_info(m, str, translation_table[mpc_record]);
#else
    Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
#endif

#if MAX_MP_BUSSES < 256
    if (m->mpc_busid >= MAX_MP_BUSSES) {
        printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
               " is too large, max. supported is %d\n",
               m->mpc_busid, str, MAX_MP_BUSSES - 1);
        return;
    }
#endif

    if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
         set_bit(m->mpc_busid, mp_bus_not_pci);
#if defined(CONFIG_EISA) || defined (CONFIG_MCA)
        mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
#endif
    } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) {
#ifdef CONFIG_X86_NUMAQ
        mpc_oem_pci_bus(m, translation_table[mpc_record]);
#endif
        clear_bit(m->mpc_busid, mp_bus_not_pci);
        mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
        mp_current_pci_id++;
#if defined(CONFIG_EISA) || defined (CONFIG_MCA)
        mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
    } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) {
        mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
    } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA) - 1) == 0) {
        mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
#endif
    } else
        printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
}
Exemple #8
0
void
h8_send_next_cmd_byte(void)
{
        h8_cmd_q_t      *qp = list_entry(h8_actq.next, h8_cmd_q_t, link);
        int cnt;

        cnt = qp->cnt;
        qp->cnt++;

        if (h8_debug & 0x1)
                Dprintk("h8 sending next cmd byte 0x%x (0x%x)\n",
			cnt, qp->cmdbuf[cnt]);

        if (cnt) {
                WRITE_DATA(qp->cmdbuf[cnt]);
        } else {
                WRITE_CMD(qp->cmdbuf[cnt]);
        }
        return;
}
Exemple #9
0
/**
 * efi_partition(struct parsed_partitions *state, struct block_device *bdev)
 * @state
 * @bdev
 *
 * Description: called from check.c, if the disk contains GPT
 * partitions, sets up partition entries in the kernel.
 *
 * If the first block on the disk is a legacy MBR,
 * it will get handled by msdos_partition().
 * If it's a Protective MBR, we'll handle it here.
 *
 * We do not create a Linux partition for GPT, but
 * only for the actual data partitions.
 * Returns:
 * -1 if unable to read the partition table
 *  0 if this isn't our partition table
 *  1 if successful
 *
 */
int
efi_partition(struct parsed_partitions *state, struct block_device *bdev)
{
	gpt_header *gpt = NULL;
	gpt_entry *ptes = NULL;
	u32 i;
	unsigned ssz = bdev_hardsect_size(bdev) / 512;

	if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) {
		kfree(gpt);
		kfree(ptes);
		return 0;
	}

	Dprintk("GUID Partition Table is valid!  Yea!\n");

	for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
		u64 start = le64_to_cpu(ptes[i].starting_lba);
		u64 size = le64_to_cpu(ptes[i].ending_lba) -
			   le64_to_cpu(ptes[i].starting_lba) + 1ULL;

		if (!is_pte_valid(&ptes[i], last_lba(bdev)))
			continue;

		put_partition(state, i+1, start * ssz, size * ssz);

		/* If this is a RAID volume, tell md */
		if (!efi_guidcmp(ptes[i].partition_type_guid,
				 PARTITION_LINUX_RAID_GUID))
			state->parts[i+1].flags = 1;

		/* If this is a EFI System partition, tell hotplug */
		if (!efi_guidcmp(ptes[i].partition_type_guid,
				 PARTITION_SYSTEM_GUID))
			state->parts[i+1].is_efi_system_partition = 1;
	}
	kfree(ptes);
	kfree(gpt);
	printk("\n");
	return 1;
}
Exemple #10
0
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
   This runs before bootmem is initialized and gets pages directly from the 
   physical memory. To access them they are temporarily mapped. */
void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
{ 
	unsigned long next; 

	Dprintk("init_memory_mapping\n");

	/* 
	 * Find space for the kernel direct mapping tables.
	 * Later we should allocate these tables in the local node of the memory
	 * mapped.  Unfortunately this is done currently before the nodes are 
	 * discovered.
	 */
	if (!after_bootmem)
		find_early_table_space(end);

	start = (unsigned long)__va(start);
	end = (unsigned long)__va(end);

	for (; start < end; start = next) {
		unsigned long pud_phys; 
		pgd_t *pgd = pgd_offset_k(start);
		pud_t *pud;

		if (after_bootmem)
			pud = pud_offset(pgd, start & PGDIR_MASK);
		else
			pud = alloc_low_page(&pud_phys);

		next = start + PGDIR_SIZE;
		if (next > end) 
			next = end; 
		phys_pud_init(pud, __pa(start), __pa(next));
		if (!after_bootmem)
			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
		unmap_low_page(pud);
	} 

	if (!after_bootmem)
		mmu_cr4_features = read_cr4();
	__flush_tlb_all();
}
static void __pminit setup_p6_watchdog(void)
{
	unsigned int evntsel;

	nmi_perfctr_msr = MSR_P6_PERFCTR0;

	clear_msr_range(MSR_P6_EVNTSEL0, 2);
	clear_msr_range(MSR_P6_PERFCTR0, 2);

	evntsel = P6_EVNTSEL_INT
		| P6_EVNTSEL_OS
		| P6_EVNTSEL_USR
		| P6_NMI_EVENT;

	wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
	Dprintk("setting P6_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
	wrmsr(MSR_P6_PERFCTR0, -(cpu_khz/nmi_hz*1000), 0);
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	evntsel |= P6_EVNTSEL0_ENABLE;
	wrmsr(MSR_P6_EVNTSEL0, evntsel, 0);
}
Exemple #12
0
static void setup_k7_watchdog(void)
{
   unsigned int evntsel;

   nmi_perfctr_msr = MSR_K7_PERFCTR0;

   clear_msr_range(MSR_K7_EVNTSEL0, 4);
   clear_msr_range(MSR_K7_PERFCTR0, 4);

   evntsel = K7_EVNTSEL_INT
      | K7_EVNTSEL_OS
      | K7_EVNTSEL_USR
      | K7_NMI_EVENT;

   wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
   Dprintk("setting K7_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
   wrmsr(MSR_K7_PERFCTR0, -(cpu_khz/nmi_hz*1000), -1);
   apic_write(APIC_LVTPC, APIC_DM_NMI);
   evntsel |= K7_EVNTSEL_ENABLE;
   wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
}
Exemple #13
0
struct file * tux_open_file (char *filename, int mode)
{
	struct file *filp;

	if (!filename)
		TUX_BUG();

	/* Rule no. 3 -- Does the file exist ? */

	filp = filp_open(filename, mode, 0600);

	if (IS_ERR(filp) || !filp || !filp->f_dentry)
		goto err;

out:
	return filp;
err:
	Dprintk("filp_open() error: %d.\n", (int)filp);
	filp = NULL;
	goto out;
}
Exemple #14
0
/*
 * Allocate node_to_cpumask_map based on number of available nodes
 * Requires node_possible_map to be valid.
 *
 * Note: node_to_cpumask() is not valid until after this is done.
 */
static void __init setup_node_to_cpumask_map(void)
{
	unsigned int node, num = 0;
	cpumask_t *map;

	/* setup nr_node_ids if not done yet */
	if (nr_node_ids == MAX_NUMNODES) {
		for_each_node_mask(node, node_possible_map)
			num = node;
		nr_node_ids = num + 1;
	}

	/* allocate the map */
	map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));

	Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n",
		map, nr_node_ids);

	/* node_to_cpumask() will now work */
	node_to_cpumask_map = map;
}
Exemple #15
0
static int setup_p4_watchdog(void)
{
	unsigned int misc_enable, dummy;

	rdmsr(MSR_P4_MISC_ENABLE, misc_enable, dummy);
	if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
		return 0;

	nmi_perfctr_msr = MSR_P4_IQ_COUNTER0;
	nmi_p4_cccr_val = P4_NMI_IQ_CCCR0;
#ifdef CONFIG_SMP
	if (smp_num_siblings == 2)
		nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
#endif

	if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL))
		clear_msr_range(0x3F1, 2);
	/* MSR 0x3F0 seems to have a default value of 0xFC00, but current
	   docs doesn't fully define it, so leave it alone for now. */
	if (boot_cpu_data.x86_model >= 0x3) {
		/* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
		clear_msr_range(0x3A0, 26);
		clear_msr_range(0x3BC, 3);
	} else {
		clear_msr_range(0x3A0, 31);
	}
	clear_msr_range(0x3C0, 6);
	clear_msr_range(0x3C8, 6);
	clear_msr_range(0x3E0, 2);
	clear_msr_range(MSR_P4_CCCR0, 18);
	clear_msr_range(MSR_P4_PERFCTR0, 18);

	wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
	wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
	Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz * 1000UL / nmi_hz));
	wrmsrl(MSR_P4_IQ_COUNTER0, -((u64)cpu_khz * 1000 / nmi_hz));
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
	return 1;
}
Exemple #16
0
void queue_cachemiss (tux_req_t *req)
{
	iothread_t *iot = req->ti->iot;

	Dprintk("queueing_cachemiss(req:%p) (req->cwd_dentry: %p) at %p:%p.\n",
		req, req->cwd_dentry, __builtin_return_address(0), __builtin_return_address(1));
	if (req->idle_input || req->wait_output_space)
		TUX_BUG();
	req->had_cachemiss = 1;
	if (!list_empty(&req->work))
		TUX_BUG();
	spin_lock(&iot->async_lock);
	if (connection_too_fast(req))
		list_add_tail(&req->work, &iot->async_queue);
	else
		list_add(&req->work, &iot->async_queue);
	iot->nr_async_pending++;
	INC_STAT(nr_cachemiss_pending);
	spin_unlock(&iot->async_lock);

	wake_up(&iot->async_sleep);
}
Exemple #17
0
/*
 * Activate a secondary processor.  head.S calls this.
 */
int __devinit
start_secondary (void *unused)
{
	/* Early console may use I/O ports */
	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
#ifndef XEN
	Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
	efi_map_pal_code();
#endif
	cpu_init();
	smp_callin();

#ifdef XEN
	if (vmx_enabled)
		vmx_init_env(0, 0);

	startup_cpu_idle_loop();
#else
	cpu_idle();
#endif
	return 0;
}
static void __init MP_bus_info (struct mpc_config_bus *m)
{
	char str[7];

	memcpy(str, m->mpc_bustype, 6);
	str[6] = 0;
	Dprintk("Bus #%d is %s\n", m->mpc_busid, str);

	if (strncmp(str, "ISA", 3) == 0) {
		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
	} else if (strncmp(str, "EISA", 4) == 0) {
		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
	} else if (strncmp(str, "PCI", 3) == 0) {
		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
		mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
		mp_current_pci_id++;
	} else if (strncmp(str, "MCA", 3) == 0) {
		mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
	} else {
		printk(KERN_ERR "Unknown bustype %s\n", str);
	}
}
Exemple #19
0
static __init void set_pte_phys(unsigned long vaddr,
			 unsigned long phys, pgprot_t prot, int user_mode)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte, new_pte;

	Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);

	pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
	if (pgd_none(*pgd)) {
		printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
		return;
	}
	pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
	if (pud_none(*pud)) {
		pmd = (pmd_t *) spp_getpage(); 
		make_page_readonly(pmd, XENFEAT_writable_page_tables);
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
		if (pmd != pmd_offset(pud, 0)) {
			printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
			return;
		}
	}
	pmd = pmd_offset(pud, vaddr);
	if (pmd_none(*pmd)) {
		pte = (pte_t *) spp_getpage();
		make_page_readonly(pte, XENFEAT_writable_page_tables);
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
		if (pte != pte_offset_kernel(pmd, 0)) {
			printk("PAGETABLE BUG #02!\n");
			return;
		}
	}
	if (pgprot_val(prot))
		new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
	else
Exemple #20
0
static tux_req_t * get_cachemiss (iothread_t *iot)
{
	struct list_head *tmp;
	tux_req_t *req = NULL;

	spin_lock(&iot->async_lock);
	if (!list_empty(&iot->async_queue)) {

		tmp = iot->async_queue.next;
		req = list_entry(tmp, tux_req_t, work);

		Dprintk("get_cachemiss(%p): got req %p.\n", iot, req);
		list_del(tmp);
		DEBUG_DEL_LIST(tmp);
		iot->nr_async_pending--;
		DEC_STAT(nr_cachemiss_pending);

		if (req->ti->iot != iot)
			TUX_BUG();
	}
	spin_unlock(&iot->async_lock);
	return req;
}
Exemple #21
0
int __devinit __cpu_up(unsigned int cpu)
{
	/* This only works at boot for x86.  See "rewrite" above. */
	if (cpu_isset(cpu, smp_commenced_mask)) {
		local_irq_enable();
		return -ENOSYS;
	}

	/* In case one didn't come up */
	if (!cpu_isset(cpu, cpu_callin_map)) {
		local_irq_enable();
		return -EIO;
	}
	local_irq_enable();

	/* Unleash the CPU! */
	Dprintk("waiting for cpu %d\n", cpu);

	cpu_set(cpu, smp_commenced_mask);
	while (!cpu_isset(cpu, cpu_online_map))
		mb();
	return 0;
}
Exemple #22
0
/* 
 * Basic means by which commands are sent to the H8.
 */
void
h8_q_cmd(u_char *cmd, int cmd_size, int resp_size)
{
        h8_cmd_q_t      *qp;
	unsigned long flags;
        int             i;

        /* get cmd buf */
	save_flags(flags); cli();
        while (list_empty(&h8_freeq)) {
                Dprintk("H8: need to allocate more cmd buffers\n");
                restore_flags(flags);
                h8_alloc_queues();
                save_flags(flags); cli();
        }
        /* get first element from queue */
        qp = list_entry(h8_freeq.next, h8_cmd_q_t, link);
        list_del(&qp->link);

        restore_flags(flags);

        /* fill it in */
        for (i = 0; i < cmd_size; i++)
            qp->cmdbuf[i] = cmd[i];
        qp->ncmd = cmd_size;
        qp->nrsp = resp_size;

        /* queue it at the end of the cmd queue */
        save_flags(flags); cli();

        /* XXX this actually puts it at the start of cmd queue, bug? */
        list_add(&qp->link, &h8_cmdq);

        restore_flags(flags);

        h8_start_new_cmd();
}
Exemple #23
0
static void __init MP_processor_info (struct mpc_config_processor *m)
{
	int ver;

	if (!(m->mpc_cpuflag & CPU_ENABLED))
		return;

	printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
		m->mpc_apicid,
	       (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
	       (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
		m->mpc_apicver);

	if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
		Dprintk("    Bootup CPU\n");
		boot_cpu_id = m->mpc_apicid;
	}
	num_processors++;

	if (m->mpc_apicid > MAX_APICS) {
		printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
			m->mpc_apicid, MAX_APICS);
		return;
	}
	ver = m->mpc_apicver;

	physid_set(m->mpc_apicid, phys_cpu_present_map);
	/*
	 * Validate version
	 */
	if (ver == 0x0) {
		printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
		ver = 0x10;
	}
	apic_version[m->mpc_apicid] = ver;
}
Exemple #24
0
static void __pminit setup_p6_watchdog(void)
{
	int i;
	unsigned int evntsel;

	nmi_perfctr_msr = MSR_IA32_PERFCTR0;

	for(i = 0; i < 2; ++i) {
		wrmsr(MSR_IA32_EVNTSEL0+i, 0, 0);
		wrmsr(MSR_IA32_PERFCTR0+i, 0, 0);
	}

	evntsel = P6_EVNTSEL_INT
		| P6_EVNTSEL_OS
		| P6_EVNTSEL_USR
		| P6_NMI_EVENT;

	wrmsr(MSR_IA32_EVNTSEL0, evntsel, 0);
	Dprintk("setting IA32_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
	wrmsr(MSR_IA32_PERFCTR0, -(cpu_khz/nmi_hz*1000), 0);
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	evntsel |= P6_EVNTSEL0_ENABLE;
	wrmsr(MSR_IA32_EVNTSEL0, evntsel, 0);
}
Exemple #25
0
/*==========================================================================*
 * Name:         do_boot_cpu
 *
 * Description:  This routine boot up one AP.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    phys_id - Target CPU physical ID
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 * 2003-06-24 hy  modify for linux-2.5.69
 *
 *==========================================================================*/
static void __init do_boot_cpu(int phys_id)
{
	struct task_struct *idle;
	unsigned long send_status, boot_status;
	int timeout, cpu_id;

	cpu_id = ++cpucount;

	/*
	 * We can't use kernel_thread since we must avoid to
	 * reschedule the child.
	 */
	idle = fork_idle(cpu_id);
	if (IS_ERR(idle))
		panic("failed fork for CPU#%d.", cpu_id);

	idle->thread.lr = (unsigned long)start_secondary;

	map_cpu_to_physid(cpu_id, phys_id);

	/* So we see what's up   */
	printk("Booting processor %d/%d\n", phys_id, cpu_id);
	stack_start.spi = (void *)idle->thread.sp;
	task_thread_info(idle)->cpu = cpu_id;

	/*
	 * Send Startup IPI
	 *   1.IPI received by CPU#(phys_id).
	 *   2.CPU#(phys_id) enter startup_AP (arch/m32r/kernel/head.S)
	 *   3.CPU#(phys_id) enter start_secondary()
	 */
	send_status = 0;
	boot_status = 0;

	cpu_set(phys_id, cpu_bootout_map);

	/* Send Startup IPI */
	send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0);

	Dprintk("Waiting for send to finish...\n");
	timeout = 0;

	/* Wait 100[ms] */
	do {
		Dprintk("+");
		udelay(1000);
		send_status = !cpu_isset(phys_id, cpu_bootin_map);
	} while (send_status && (timeout++ < 100));

	Dprintk("After Startup.\n");

	if (!send_status) {
		/*
		 * allow APs to start initializing.
		 */
		Dprintk("Before Callout %d.\n", cpu_id);
		cpu_set(cpu_id, cpu_callout_map);
		Dprintk("After Callout %d.\n", cpu_id);

		/*
		 * Wait 5s total for a response
		 */
		for (timeout = 0; timeout < 5000; timeout++) {
			if (cpu_isset(cpu_id, cpu_callin_map))
				break;	/* It has booted */
			udelay(1000);
		}

		if (cpu_isset(cpu_id, cpu_callin_map)) {
			/* number CPUs logically, starting from 1 (BSP is 0) */
			Dprintk("OK.\n");
		} else {
			boot_status = 1;
			printk("Not responding.\n");
		}
	} else
		printk("IPI never delivered???\n");

	if (send_status || boot_status) {
		unmap_cpu_to_physid(cpu_id, phys_id);
		cpu_clear(cpu_id, cpu_callout_map);
		cpu_clear(cpu_id, cpu_callin_map);
		cpu_clear(cpu_id, cpu_initialized);
		cpucount--;
	}
}
Exemple #26
0
/*==========================================================================*
 * Name:         smp_prepare_cpus (old smp_boot_cpus)
 *
 * Description:  This routine boot up APs.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    NONE
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 * 2003-06-24 hy  modify for linux-2.5.69
 *
 *==========================================================================*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	int phys_id;
	unsigned long nr_cpu;

	nr_cpu = inl(M32R_FPGA_NUM_OF_CPUS_PORTL);
	if (nr_cpu > NR_CPUS) {
		printk(KERN_INFO "NUM_OF_CPUS reg. value [%ld] > NR_CPU [%d]",
			nr_cpu, NR_CPUS);
		goto smp_done;
	}
	for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
		physid_set(phys_id, phys_cpu_present_map);
#ifndef CONFIG_HOTPLUG_CPU
	cpu_present_map = cpu_possible_map;
#endif

	show_mp_info(nr_cpu);

	init_ipi_lock();

	/*
	 * Setup boot CPU information
	 */
	smp_store_cpu_info(0); /* Final full version of the data */

	/*
	 * If SMP should be disabled, then really disable it!
	 */
	if (!max_cpus) {
		printk(KERN_INFO "SMP mode deactivated by commandline.\n");
		goto smp_done;
	}

	/*
	 * Now scan the CPU present map and fire up the other CPUs.
	 */
	Dprintk("CPU present map : %lx\n", physids_coerce(phys_cpu_present_map));

	for (phys_id = 0 ; phys_id < NR_CPUS ; phys_id++) {
		/*
		 * Don't even attempt to start the boot CPU!
		 */
		if (phys_id == bsp_phys_id)
			continue;

		if (!physid_isset(phys_id, phys_cpu_present_map))
			continue;

		if ((max_cpus >= 0) && (max_cpus <= cpucount + 1))
			continue;

		do_boot_cpu(phys_id);

		/*
		 * Make sure we unmap all failed CPUs
		 */
		if (physid_to_cpu(phys_id) == -1) {
			physid_clear(phys_id, phys_cpu_present_map);
			printk("phys CPU#%d not responding - " \
				"cannot use it.\n", phys_id);
		}
	}

smp_done:
	Dprintk("Boot done.\n");
}
Exemple #27
0
static void __cpuinit
smp_callin (void)
{
	int cpuid, phys_id, itc_master;
	struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
	extern void ia64_init_itm(void);
	extern volatile int time_keeper_id;

#ifdef CONFIG_PERFMON
	extern void pfm_init_percpu(void);
#endif

	cpuid = smp_processor_id();
	phys_id = hard_smp_processor_id();
	itc_master = time_keeper_id;

	if (cpu_online(cpuid)) {
		printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
		       phys_id, cpuid);
		BUG();
	}

	fix_b0_for_bsp();

	lock_ipi_calllock();
	spin_lock(&vector_lock);
	/* Setup the per cpu irq handling data structures */
	__setup_vector_irq(cpuid);
	cpu_set(cpuid, cpu_online_map);
	unlock_ipi_calllock();
	per_cpu(cpu_state, cpuid) = CPU_ONLINE;
	spin_unlock(&vector_lock);

	smp_setup_percpu_timer();

	ia64_mca_cmc_vector_setup();	/* Setup vector on AP */

#ifdef CONFIG_PERFMON
	pfm_init_percpu();
#endif

	local_irq_enable();

	if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
		/*
		 * Synchronize the ITC with the BP.  Need to do this after irqs are
		 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
		 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
		 * local_bh_enable(), which bugs out if irqs are not enabled...
		 */
		Dprintk("Going to syncup ITC with ITC Master.\n");
		ia64_sync_itc(itc_master);
	}

	/*
	 * Get our bogomips.
	 */
	ia64_init_itm();

	/*
	 * Delay calibration can be skipped if new processor is identical to the
	 * previous processor.
	 */
	last_cpuinfo = cpu_data(cpuid - 1);
	this_cpuinfo = local_cpu_data;
	if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
	    last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
	    last_cpuinfo->features != this_cpuinfo->features ||
	    last_cpuinfo->revision != this_cpuinfo->revision ||
	    last_cpuinfo->family != this_cpuinfo->family ||
	    last_cpuinfo->archrev != this_cpuinfo->archrev ||
	    last_cpuinfo->model != this_cpuinfo->model)
		calibrate_delay();
	local_cpu_data->loops_per_jiffy = loops_per_jiffy;

#ifdef CONFIG_IA32_SUPPORT
	ia32_gdt_init();
#endif

	/*
	 * Allow the master to continue.
	 */
	cpu_set(cpuid, cpu_callin_map);
	Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
}
Exemple #28
0
static asmlinkage void netpoll_netdump(struct pt_regs *regs, void *platform_arg)
{
	reply_t reply;
	char *tmp = command_tmp;
	extern unsigned long totalram_pages;
	struct pt_regs myregs;
	req_t *req;

	/*
	 * Just in case we are crashing within the networking code
	 * ... attempt to fix up.
	 */
	netpoll_reset_locks(&np);
	platform_fix_regs();
	platform_timestamp(t0);
	netpoll_set_trap(1); /* bypass networking stack */

	local_irq_disable();
	local_bh_disable();
	printk("< netdump activated - performing handshake with the server. >\n");
	netdump_startup_handshake(&np);

	printk("< handshake completed - listening for dump requests. >\n");

	while (netdump_mode) {
		Dprintk("main netdump loop: polling controller ...\n");
		netpoll_poll(&np);

		req = get_new_req();
		if (!req)
			continue;

		Dprintk("got new req, command %d.\n", req->command);
		print_status(req);
		switch (req->command) {
		case COMM_NONE:
			Dprintk("got NO command.\n");
			break;

		case COMM_SEND_MEM:
			Dprintk("got MEM command.\n");
			send_netdump_mem(&np, req);
			break;

		case COMM_EXIT:
			Dprintk("got EXIT command.\n");
			netdump_mode = 0;
			netpoll_set_trap(0);
			break;

		case COMM_REBOOT:
			Dprintk("got REBOOT command.\n");
			printk("netdump: rebooting in 3 seconds.\n");
			netdump_mdelay(3000);
			machine_restart(NULL);
			break;

		case COMM_HELLO:
			sprintf(tmp, "Hello, this is netdump version 0.%02d\n",
				NETDUMP_VERSION);
			reply.code = REPLY_HELLO;
			reply.nr = req->nr;
			reply.info = NETDUMP_VERSION;
			send_netdump_msg(&np, tmp, strlen(tmp), &reply);
			break;

		case COMM_GET_PAGE_SIZE:
			sprintf(tmp, "PAGE_SIZE: %ld\n", PAGE_SIZE);
			reply.code = REPLY_PAGE_SIZE;
			reply.nr = req->nr;
			reply.info = PAGE_SIZE;
			send_netdump_msg(&np, tmp, strlen(tmp), &reply);
			break;

		case COMM_GET_REGS:
			reply.code = REPLY_REGS;
			reply.nr = req->nr;
			reply.info = (u32)totalram_pages;
        		send_netdump_msg(&np, tmp,
				platform_get_regs(tmp, &myregs), &reply);
			break;

		case COMM_GET_NR_PAGES:
			reply.code = REPLY_NR_PAGES;
			reply.nr = req->nr;
			reply.info = platform_max_pfn();
			sprintf(tmp, 
				"Number of pages: %ld\n", platform_max_pfn());
			send_netdump_msg(&np, tmp, strlen(tmp), &reply);
			break;

		case COMM_SHOW_STATE:
			/* send response first */
			reply.code = REPLY_SHOW_STATE;
			reply.nr = req->nr;
			reply.info = 0;

			netdump_mode = 0;
			if (regs)
				show_regs(regs);
			show_state();
			show_mem();
			netdump_mode = 1;

			send_netdump_msg(&np, tmp, strlen(tmp), &reply);

			break;

		default:
			reply.code = REPLY_ERROR;
			reply.nr = req->nr;
			reply.info = req->command;
			Dprintk("got UNKNOWN command!\n");
			sprintf(tmp, "Got unknown command code %d!\n", 
				req->command);
			send_netdump_msg(&np, tmp, strlen(tmp), &reply);
			break;
		}
		kfree(req);
		req = NULL;
	}
	sprintf(tmp, "NETDUMP end.\n");
	reply.code = REPLY_END_NETDUMP;
	reply.nr = 0;
	reply.info = 0;
	send_netdump_msg(&np, tmp, strlen(tmp), &reply);
	printk("NETDUMP END!\n");
}
static int __devinit
do_boot_cpu (int sapicid, int cpu)
{
    int timeout;
    struct create_idle c_idle = {
        .cpu	= cpu,
        .done	= COMPLETION_INITIALIZER(c_idle.done),
    };
    DECLARE_WORK(work, do_fork_idle, &c_idle);

    c_idle.idle = get_idle_for_cpu(cpu);
    if (c_idle.idle) {
        init_idle(c_idle.idle, cpu);
        goto do_rest;
    }

    /*
     * We can't use kernel_thread since we must avoid to reschedule the child.
     */
    if (!keventd_up() || current_is_keventd())
        work.func(work.data);
    else {
        schedule_work(&work);
        wait_for_completion(&c_idle.done);
    }

    if (IS_ERR(c_idle.idle))
        panic("failed fork for CPU %d", cpu);

    set_idle_for_cpu(cpu, c_idle.idle);

do_rest:
    task_for_booting_cpu = c_idle.idle;

    Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);

    set_brendez_area(cpu);
    platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);

    /*
     * Wait 10s total for the AP to start
     */
    Dprintk("Waiting on callin_map ...");
    for (timeout = 0; timeout < 100000; timeout++) {
        if (cpu_isset(cpu, cpu_callin_map))
            break;  /* It has booted */
        udelay(100);
    }
    Dprintk("\n");

    if (!cpu_isset(cpu, cpu_callin_map)) {
        printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
        ia64_cpu_to_sapicid[cpu] = -1;
        cpu_clear(cpu, cpu_online_map);  /* was set in smp_callin() */
        return -EINVAL;
    }
    return 0;
}

static int __init
decay (char *str)
{
    int ticks;
    get_option (&str, &ticks);
    return 1;
}
static void __devinit
smp_callin (void)
{
    int cpuid, phys_id;
    extern void ia64_init_itm(void);

#ifdef CONFIG_PERFMON
    extern void pfm_init_percpu(void);
#endif

    cpuid = smp_processor_id();
    phys_id = hard_smp_processor_id();

    if (cpu_online(cpuid)) {
        printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
               phys_id, cpuid);
        BUG();
    }

    lock_ipi_calllock();
    cpu_set(cpuid, cpu_online_map);
    unlock_ipi_calllock();
    per_cpu(cpu_state, cpuid) = CPU_ONLINE;

    smp_setup_percpu_timer();

    ia64_mca_cmc_vector_setup();	/* Setup vector on AP */

#ifdef CONFIG_PERFMON
    pfm_init_percpu();
#endif

    local_irq_enable();

    if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
        /*
         * Synchronize the ITC with the BP.  Need to do this after irqs are
         * enabled because ia64_sync_itc() calls smp_call_function_single(), which
         * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
         * local_bh_enable(), which bugs out if irqs are not enabled...
         */
        Dprintk("Going to syncup ITC with BP.\n");
        ia64_sync_itc(0);
    }

    /*
     * Get our bogomips.
     */
    ia64_init_itm();
    calibrate_delay();
    local_cpu_data->loops_per_jiffy = loops_per_jiffy;

#ifdef CONFIG_IA32_SUPPORT
    ia32_gdt_init();
#endif

    /*
     * Allow the master to continue.
     */
    cpu_set(cpuid, cpu_callin_map);
    Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
}