Example #1
0
void do_xcpu_delete_all_threads(cpu_mailbox_t * mailbox)
{
    space_t* space = (space_t*)mailbox->param[0];

    xcpu_unwind_mailbox = mailbox;

    delete_all_threads(space);

    xcpu_unwind_mailbox = NULL;

    /* ensure that our pagetable does not disappear */
    if (get_current_pagetable() == space->pagedir_phys(get_cpu_id()))
    {
	/* after deleting all threads we must switch the pagetable */
	set_current_pagetable(get_kernel_space()->pagedir_phys(get_cpu_id()));

	if (get_current_tcb() != get_idle_tcb())
	{
	    mailbox->switch_to_idle(get_current_tcb(), MAILBOX_OK);
	    /* not re-activated */
	    panic("mailbox->switch_to_idle reactivated");
	}

    }
    mailbox->set_status(MAILBOX_OK);
}
Example #2
0
void do_xcpu_thread_ex_regs(cpu_mailbox_t * mailbox)
{
    tcb_t * tcb = mailbox->tcb;
    if (tcb->cpu != get_cpu_id())
    {
	IPI_PRINTF("xcpu ex regs on wrong cpu (%x)\n", tcb);
	mailbox->set_status(MAILBOX_ERROR);
	return;
    }

    IPI_PRINTF("%s %x, state=%x\n", __FUNCTION__, tcb, tcb->thread_state);
    
    xcpu_unwind_mailbox = mailbox;
    IPI_PRINTF("before unwind_ipc (%x)\n", get_cpu_id(), tcb);
	
    /* unwind ipc expects to have the spinlock! */
    spin_lock(&tcb->tcb_spinlock);
    unwind_ipc(tcb);
    IPI_PRINTF("after unwind_ipc (%x)\n", get_cpu_id(), tcb);

    if (mailbox->param[1] == ~0U)
	mailbox->param[1] = get_user_sp(tcb);
    else
	mailbox->param[1] = set_user_sp(tcb, mailbox->param[0]);

    if (mailbox->param[0] == ~0U)
	mailbox->param[0] = get_user_ip(tcb);
    else
	mailbox->param[0] = set_user_ip(tcb, mailbox->param[0]);

    if (mailbox->tid == L4_INVALID_ID)
	mailbox->tid = tcb->pager;
    else
    {
	l4_threadid_t oldpager;
	oldpager = tcb->pager;
	tcb->pager = mailbox->tid;
	mailbox->tid = oldpager;
    }

    /* 
     * we never allocate xcpu-threads 
     * therefore we can always set them running
     */
    thread_dequeue_wakeup(tcb);
    tcb->thread_state = TS_RUNNING;
    thread_enqueue_ready(tcb);
    spin_unlock(&tcb->tcb_spinlock);
    
    xcpu_unwind_mailbox = NULL;
    mailbox->set_status(MAILBOX_OK);
}
Example #3
0
void do_xcpu_thread_put(cpu_mailbox_t * mailbox)
{
    tcb_t * tcb = mailbox->tcb;
    IPI_PRINTF("%s %p, state=%x\n", __FUNCTION__, tcb, tcb->thread_state);

    tcb->cpu = get_cpu_id();
    thread_adapt_queue_state(tcb, mailbox->param[0]);
    mailbox->set_status(MAILBOX_OK);
	
    /* adjust the page directory for this tcb */
    IPI_PRINTF("pgdir for tcb=%x: %x\n", tcb, tcb->pagedir_cache);
    thread_adapt_pagetable(tcb, get_cpu_id());
    IPI_PRINTF("pgdir for tcb=%x: %x\n", tcb, tcb->pagedir_cache);
    //while(1);
}
Example #4
0
void do_xcpu_thread_get(cpu_mailbox_t * mailbox)
{
    tcb_t* tcb = mailbox->tcb;
    IPI_PRINTF("%s %p, state=%x\n", __FUNCTION__, tcb, tcb->thread_state);

    /* not on our cpu! */
    if (tcb->cpu != get_cpu_id())
    {
	mailbox->set_status(MAILBOX_ERROR);
	return;
    }

    mailbox->param[0] = tcb->queue_state;
    thread_dequeue_present(tcb);
    thread_dequeue_ready(tcb);
    thread_dequeue_wakeup(tcb);

    /* are we ourself the migrated thread ??? */
    if (tcb == get_current_tcb())
    {
	apic_ack_irq();
	mailbox->switch_to_idle(tcb /* which is current */, MAILBOX_OK);
	return;
    };
	
    mailbox->set_status(MAILBOX_OK);
}
Example #5
0
int smp_delete_all_threads(space_t * space)
{
    //IPI_PRINTF("%s (%x)\n", __FUNCTION__, victim);
    cpu_mailbox_t * mailbox = get_mailbox();
    for (dword_t cpu = 0; cpu < CONFIG_SMP_MAX_CPU; cpu++)
    {
	if (cpu == get_cpu_id())
	    continue;
	if (!is_cpu_online(cpu))
	    continue;

	mailbox->param[0] = (dword_t)space;
	dword_t status = mailbox->send_command(cpu, SMP_CMD_DELETE_ALL_THREADS);
	switch(status)
	{
	case MAILBOX_OK:
	    return 1;
	case MAILBOX_UNWIND_REMOTE:
	    /* we have to perform a remote unwind */
	    IPI_PRINTF("%s: remote unwind %x\n", mailbox->tcb);
	    unwind_ipc(mailbox->tcb);
	    break;
	case MAILBOX_ERROR:
	    enter_kdebug("smp_delete_task: error deleting task");
	    break;
	default:
	    enter_kdebug("smp_delete_task: unexpected return value");
	    break;
	}
    }
    return 0;
}
Example #6
0
void do_xcpu_ipc_receive(cpu_mailbox_t * mailbox)
{
    tcb_t * from_tcb = mailbox->tcb;
    tcb_t * to_tcb = (tcb_t*)mailbox->param[0];
    XIPC_PRINTF("ipi: %s from: %p, to: %p\n", __FUNCTION__, from_tcb, to_tcb);
    if (from_tcb->cpu != get_cpu_id())
    {
	IPI_PRINTF("from_tcb->cpu != current_cpu\n");
	mailbox->set_status(MAILBOX_ERROR);
	return;
    }

    spin_lock(&to_tcb->tcb_spinlock);

    if (!IS_POLLING(from_tcb) || 
	(from_tcb->partner != to_tcb->myself))
    {
	IPI_PRINTF("not receiving from partner anymore\n");
	mailbox->set_status(MAILBOX_ERROR);
	spin_unlock(&to_tcb->tcb_spinlock);
	return;
    }

    /* ok - we are partners and want to do ipc with each other */
    thread_dequeue_send(to_tcb, from_tcb);
    thread_dequeue_wakeup(from_tcb);
    from_tcb->thread_state = TS_XCPU_LOCKED_RUNNING;
    thread_enqueue_ready(from_tcb);
    spin_unlock(&to_tcb->tcb_spinlock);

    /* release other cpu */
    mailbox->set_status(MAILBOX_OK);
}
Example #7
0
void do_xcpu_ipc_start(cpu_mailbox_t * mailbox)
{
    tcb_t * to_tcb = mailbox->tcb;
    tcb_t * from_tcb = (tcb_t*)mailbox->param[0];
    IPI_PRINTF("ipi: %s from: %p, to: %p\n", __FUNCTION__, from_tcb, to_tcb);

    if (to_tcb->cpu != get_cpu_id())
    {
	IPI_PRINTF("%s to_tcb->cpu != current_cpu\n", __FUNCTION__);
	mailbox->set_status(MAILBOX_ERROR);
	return;
    }
	
    spin_lock(&to_tcb->tcb_spinlock);

    /* check if destination still receives from me */
    if (!( ( to_tcb->thread_state == TS_WAITING ) && 
	   ( to_tcb->partner == L4_NIL_ID || 
	     to_tcb->partner == from_tcb->myself ) ))
    {
	XIPC_PRINTF("destination not waiting (state: %x, partner: %x)\n", 
		    to_tcb->thread_state, to_tcb->partner);
	spin_unlock(&to_tcb->tcb_spinlock);
	mailbox->set_status(MAILBOX_ERROR);
	return;
    }
    
    /* set into receiving state */
    to_tcb->thread_state = TS_LOCKED_WAITING;
    spin_unlock(&to_tcb->tcb_spinlock);

    /* release partner CPU */
    mailbox->set_status(MAILBOX_OK);
}    
Example #8
0
void do_xcpu_ipc_end(cpu_mailbox_t * mailbox)
{
    tcb_t * to_tcb = mailbox->tcb;
    tcb_t * from_tcb = (tcb_t*)mailbox->param[0];
    XIPC_PRINTF("ipi: %s from: %p, to: %p\n", __FUNCTION__, from_tcb, to_tcb);

    if (to_tcb->cpu != get_cpu_id())
    {
	IPI_PRINTF("%s: to_tcb->cpu != current_cpu\n", __FUNCTION__);
	mailbox->set_status(MAILBOX_ERROR);
	return;
    }
	
    spin_lock(&to_tcb->tcb_spinlock);
    
    /* check that everything is still all right! */
    if (to_tcb->thread_state != TS_LOCKED_WAITING ||
	to_tcb->partner != from_tcb->myself)
    {
	IPI_PRINTF("%s something whicked happened meanwhile (ex_regs?) (to->state=%x, to->partner=%x\n", __FUNCTION__, to_tcb->thread_state, to_tcb->partner);
	spin_unlock(&to_tcb->tcb_spinlock);
	mailbox->set_status(MAILBOX_ERROR);
	return;
    }

    /* ok, everything is fine, we have to explicitly enqueue the thread,
     * since we do not directly switch */
    to_tcb->thread_state = TS_RUNNING;
    thread_enqueue_ready(to_tcb);

    spin_unlock(&to_tcb->tcb_spinlock);
    mailbox->set_status(MAILBOX_OK);
}
Example #9
0
SE_HANDLE
vpn_start (void *data)
{
	struct vpn_msg_start *arg;
	struct msgbuf buf[1];
	SE_HANDLE ret;
	int i;

	spinlock_lock (&handle_lock);
	for (i = 0; i < NUM_OF_HANDLE; i++)
		if (handle[i] == NULL)
			goto found;
	panic ("vpn_start: handle full");
found:
	handle[i] = data;
	spinlock_unlock (&handle_lock);
	arg = mempool_allocmem (mp, sizeof *arg);
	arg->handle = i;
	arg->cpu = get_cpu_id ();
	setmsgbuf (&buf[0], arg, sizeof *arg, 1);
	callsub (VPN_MSG_START, buf, 1);
	ret = arg->retval;
	mempool_freemem (mp, arg);
	return ret;
}
Example #10
0
static void
sendvirtualnicrecv_premap (SE_HANDLE nic_handle, UINT num_packets,
			   void **packets, UINT *packet_sizes, void *param,
			   long *premap)
{
	struct vpn_msg_virtualnicrecv *arg;
	struct msgbuf *buf;
	UINT i;

	arg = mempool_allocmem (mp, sizeof *arg);
	arg->nic_handle = nic_handle;
	arg->param = param;
	arg->num_packets = num_packets;
	arg->cpu = get_cpu_id ();
	buf = alloc (sizeof *buf * (1 + num_packets));
	setmsgbuf (&buf[0], arg, sizeof *arg, 0);
	if (premap) {
		for (i = 0; i < num_packets; i++)
			setmsgbuf_premap (&buf[1 + i], packets[i],
					  packet_sizes[i], 0, premap[i]);
	} else {
		for (i = 0; i < num_packets; i++)
			setmsgbuf (&buf[1 + i], packets[i], packet_sizes[i],
				   0);
	}
	callsub (VPN_MSG_VIRTUALNICRECV, buf, num_packets + 1);
	free (buf);
	mempool_freemem (mp, arg);
}
Example #11
0
/* pre-condition:
 *   tcb is waiting or receiving on another cpu
 */
dword_t smp_start_short_ipc(tcb_t * tcb, tcb_t * current)
{
    XIPC_PRINTF("sending start_short_ipc ipi (current=%p)\n", current);

    cpu_mailbox_t * mailbox = get_mailbox();    
    mailbox->tcb = tcb;
    mailbox->param[0] = (dword_t)current;

    dword_t status = mailbox->send_command(tcb->cpu, SMP_CMD_IPC_SHORT);

    /* 
     * ok - delivery can start now
     * partner cpu spins in mailbox loop and waits for message.
     */
    if (status == MAILBOX_OK)
	return 1;

    IPI_PRINTF("%d smp_start_short_ipc failed (%x (%d, %x) -> %x (%d, %x))\n", 
	       get_cpu_id(), 
	       current, current->cpu, current->thread_state,
	       tcb, tcb->cpu, tcb->thread_state);

    /* ipc failed - check whether we have pending requests */
    IPI_PRINTF("pending requests = %x\n", mailbox->pending_requests);
    mailbox->handle_pending_requests();

    return 0;
}
Example #12
0
void do_xcpu_unwind(cpu_mailbox_t * mailbox)
{
    tcb_t * tcb = mailbox->tcb;
    if (tcb->cpu != get_cpu_id())
    {
	IPI_PRINTF("xcpu unwind on wrong cpu (%x)\n", tcb);
	mailbox->set_status(MAILBOX_ERROR);
	return;
    }

    IPI_PRINTF("%s %x (partner=%x, current=%x)\n",
	       __FUNCTION__, tcb, tcb->partner, get_current_tcb());

    xcpu_unwind_mailbox = mailbox;
    
    unwind_ipc(tcb);

    xcpu_unwind_mailbox = NULL;

    /* now that guy is ready to run - enqueue into run queue */
    tcb->thread_state = TS_RUNNING;
    thread_enqueue_ready(tcb);

    /* may be ignored, if we have an unwind */
    mailbox->set_status(MAILBOX_OK);
}
Example #13
0
static int oprofile_hwsampler_init(struct oprofile_operations *ops)
{
	/*
	 * Initialize the timer mode infrastructure as well in order
	 * to be able to switch back dynamically.  oprofile_timer_init
	 * is not supposed to fail.
	 */
	if (oprofile_timer_init(ops))
		BUG();

	memcpy(&timer_ops, ops, sizeof(timer_ops));
	ops->create_files = oprofile_create_hwsampling_files;

	/*
	 * If the user space tools do not support newer cpu types,
	 * the force_cpu_type module parameter
	 * can be used to always return \"timer\" as cpu type.
	 */
	if (force_cpu_type != timer) {
		struct cpuid id;

		get_cpu_id (&id);

		switch (id.machine) {
		case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
		case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
		case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
		default: return -ENODEV;
		}
	}

	if (hwsampler_setup())
		return -ENODEV;

	/*
	 * Query the range for the sampling interval from the
	 * hardware.
	 */
	oprofile_min_interval = hwsampler_query_min_interval();
	if (oprofile_min_interval == 0)
		return -ENODEV;
	oprofile_max_interval = hwsampler_query_max_interval();
	if (oprofile_max_interval == 0)
		return -ENODEV;

	/* The initial value should be sane */
	if (oprofile_hw_interval < oprofile_min_interval)
		oprofile_hw_interval = oprofile_min_interval;
	if (oprofile_hw_interval > oprofile_max_interval)
		oprofile_hw_interval = oprofile_max_interval;

	printk(KERN_INFO "oprofile: System z hardware sampling "
	       "facility found.\n");

	ops->start = oprofile_hwsampler_start;
	ops->stop = oprofile_hwsampler_stop;

	return 0;
}
Example #14
0
/*
 * cpu_init - initializes state that is per-CPU.
 */
void cpu_init(void)
{
	struct cpuid *id = &__get_cpu_var(cpu_id);

	get_cpu_id(id);
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
	enter_lazy_tlb(&init_mm, current);
}
Example #15
0
/*
 * cpu_init - initializes state that is per-CPU.
 */
void __cpuinit cpu_init(void)
{
	struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());

	get_cpu_id(id);
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
	enter_lazy_tlb(&init_mm, current);
}
Example #16
0
static void export_kernel_boot_props(void)
{
    char tmp[PROP_VALUE_MAX];
    char cpuinfobuf[1024] = {0};
    int ret;
    unsigned i;
    struct {
        const char *src_prop;
        const char *dest_prop;
        const char *def_val;
    } prop_map[] = {
        { "ro.boot.serialno", "ro.serialno", "", },
        { "ro.boot.mode", "ro.bootmode", "unknown", },
        { "ro.boot.baseband", "ro.baseband", "unknown", },
        { "ro.boot.bootloader", "ro.bootloader", "unknown", },
    };

    get_cpu_id(cpuinfobuf, sizeof(cpuinfobuf));
    i = 0;
    property_set(prop_map[i].dest_prop, cpuinfobuf);
    for (i = 1; i < ARRAY_SIZE(prop_map); i++) {
        ret = property_get(prop_map[i].src_prop, tmp);
        if (ret > 0)
            property_set(prop_map[i].dest_prop, tmp);
        else
            property_set(prop_map[i].dest_prop, prop_map[i].def_val);
    }

    ret = property_get("ro.boot.console", tmp);
    if (ret)
        strlcpy(console, tmp, sizeof(console));

    /* save a copy for init's usage during boot */
    property_get("ro.bootmode", tmp);
    strlcpy(bootmode, tmp, sizeof(bootmode));

    /* if this was given on kernel command line, override what we read
     * before (e.g. from /proc/cpuinfo), if anything */
    ret = property_get("ro.boot.hardware", tmp);
    if (ret)
        strlcpy(hardware, tmp, sizeof(hardware));
    property_set("ro.hardware", hardware);

    snprintf(tmp, PROP_VALUE_MAX, "%d", revision);
    property_set("ro.revision", tmp);

    /* TODO: these are obsolete. We should delete them */
    if (!strcmp(bootmode,"factory"))
        property_set("ro.factorytest", "1");
    else if (!strcmp(bootmode,"factory2"))
        property_set("ro.factorytest", "2");
    else
        property_set("ro.factorytest", "0");
}
Example #17
0
/*
 * cpu_init - initializes state that is per-CPU.
 */
void __cpuinit cpu_init(void)
{
	struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
	struct cpuid *id = &__get_cpu_var(cpu_id);

	get_cpu_id(id);
	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
	enter_lazy_tlb(&init_mm, current);
	memset(idle, 0, sizeof(*idle));
}
Example #18
0
int 
numa_localize(tuple_t * relation, int64_t num_tuples, uint32_t nthreads) 
{
    uint32_t i, rv;
    uint64_t offset = 0;

    /* we need aligned allocation of items */
    create_arg_t args[nthreads];
    pthread_t tid[nthreads];
    cpu_set_t set;
    pthread_attr_t attr;

    unsigned int pagesize;
    unsigned int npages;
    unsigned int npages_perthr;
    uint64_t ntuples_perthr;
    uint64_t ntuples_lastthr;

    pagesize        = getpagesize();
    npages          = (num_tuples * sizeof(tuple_t)) / pagesize + 1;
    npages_perthr   = npages / nthreads;
    ntuples_perthr  = npages_perthr * (pagesize/sizeof(tuple_t));
    ntuples_lastthr = num_tuples - ntuples_perthr * (nthreads-1);

    pthread_attr_init(&attr);

    for( i = 0; i < nthreads; i++ ) {
        int cpu_idx = get_cpu_id(i);
        
        CPU_ZERO(&set);
        CPU_SET(cpu_idx, &set);
        pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &set);

        args[i].firstkey       = offset + 1;
        args[i].rel.tuples     = relation + offset;
        args[i].rel.num_tuples = (i == nthreads-1) ? ntuples_lastthr 
                                 : ntuples_perthr;
        offset += ntuples_perthr;

        rv = pthread_create(&tid[i], &attr, numa_localize_thread, 
                            (void*)&args[i]);
        if (rv){
            fprintf(stderr, "[ERROR] pthread_create() return code is %d\n", rv);
            exit(-1);
        }
    }

    for(i = 0; i < nthreads; i++){
        pthread_join(tid[i], NULL);
    }

    return 0;
}
Example #19
0
/*
 * cpu_init() initializes state that is per-CPU.
 */
void __cpuinit cpu_init(void)
{
        /*
         * Store processor id in lowcore (used e.g. in timer_interrupt)
         */
	get_cpu_id(&S390_lowcore.cpu_id);

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
}
Example #20
0
void spin_lock_irqsave(spin_lock_t *lock, unsigned long *flags)
{
	enter_critical(flags);
#ifdef CONFIG_SMP
	do {
		if (!lock->value) {
			lock->value = 1;
			lock->cpu = get_cpu_id();
			break;
		}
	} while (1);
#endif
}
Example #21
0
void spin_lock(spin_lock_t *lock)
{
	disable_irqs();
#ifdef CONFIG_SMP
	do {
		if (!lock->value) {
			lock->value = 1;
			lock->cpu = get_cpu_id();
			break;
		}
	} while (1);
#endif
}
Example #22
0
static void
vpn_timer_callback (void *handle, void *data)
{
	struct vpn_msg_timer *arg;
	struct msgbuf buf[1];

	arg = mempool_allocmem (mp, sizeof *arg);
	arg->now = vpn_GetTickCount ();
	arg->cpu = get_cpu_id ();
	setmsgbuf (&buf[0], arg, sizeof *arg, 1);
	callsub (VPN_MSG_TIMER, buf, 1);
	mempool_freemem (mp, arg);
}
Example #23
0
void init_secondary_cpu(void)
{
	uint16_t pcpu_id;

	init_cpu_pre(INVALID_CPU_ID);

	pcpu_id = get_cpu_id();

	init_cpu_post(pcpu_id);

	init_debug_post(pcpu_id);

	enter_guest_mode(pcpu_id);
}
Example #24
0
static void __init setup_zero_pages(void)
{
	struct cpuid cpu_id;
	unsigned int order;
	struct page *page;
	int i;

	get_cpu_id(&cpu_id);
	switch (cpu_id.machine) {
	case 0x9672:	/* g5 */
	case 0x2064:	/* z900 */
	case 0x2066:	/* z900 */
	case 0x2084:	/* z990 */
	case 0x2086:	/* z990 */
	case 0x2094:	/* z9-109 */
	case 0x2096:	/* z9-109 */
		order = 0;
		break;
	case 0x2097:	/* z10 */
	case 0x2098:	/* z10 */
	case 0x2817:	/* z196 */
	case 0x2818:	/* z196 */
		order = 2;
		break;
	case 0x2827:	/* zEC12 */
	case 0x2828:	/* zEC12 */
		order = 5;
		break;
	case 0x2964:	/* z13 */
	default:
		order = 7;
		break;
	}
	/* Limit number of empty zero pages for small memory sizes */
	while (order > 2 && (totalram_pages >> 10) < (1UL << order))
		order--;

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
		mark_page_reserved(page);
		page++;
	}

	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
Example #25
0
void smp_flush_tlb()
{
#warning inefficient implementation of tlb shootdown
    cpu_mailbox_t * mailbox = get_mailbox();
    for (dword_t cpu = 0; cpu < CONFIG_SMP_MAX_CPU; cpu++)
    {
	if (cpu == get_cpu_id())
	    continue;
	if (!is_cpu_online(cpu))
	    continue;

	dword_t status = mailbox->send_command(cpu, SMP_CMD_FLUSH_TLB);
	if (status != MAILBOX_OK)
	    enter_kdebug("smp_flush_tlb");
    }
}
/* Create the trailer data at the end of a page. */
static void cf_diag_trailer(struct cf_trailer_entry *te)
{
	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
	struct cpuid cpuid;

	te->cfvn = cpuhw->info.cfvn;		/* Counter version numbers */
	te->csvn = cpuhw->info.csvn;

	get_cpu_id(&cpuid);			/* Machine type */
	te->mach_type = cpuid.machine;
	te->cpu_speed = cf_diag_cpu_speed;
	if (te->cpu_speed)
		te->speed = 1;
	te->clock_base = 1;			/* Save clock base */
	memcpy(&te->tod_base, &tod_clock_base[1], 8);
	store_tod_clock((__u64 *)&te->timestamp);
}
Example #27
0
File: setup.c Project: 710leo/LVS
/*
 * cpu_init() initializes state that is per-CPU.
 */
void __cpuinit cpu_init(void)
{
        /*
         * Store processor id in lowcore (used e.g. in timer_interrupt)
         */
	get_cpu_id(&S390_lowcore.cpu_id);

        /*
         * Force FPU initialization:
         */
        clear_thread_flag(TIF_USEDFPU);
        clear_used_math();

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;
	BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
}
Example #28
0
void get_system_information(void)
{
	cprintf("Getting system information: ");
	cprintf("CPU...");
	_cputype=get_cpu_type();_cpuvendor=get_cpu_id();
	cprintf("\b\b\b\b\b\bFPU...");
	_fputype=get_fpu_type();_fpuinfo=get_fpu_info();
	cprintf("\b\b\b\b\b\bSYS...");
	_systype=get_sys_type();_codecpl=get_cpl();_codeiopl=get_iopl();
	_extendertype=get_extender_type();
	_dpmiflags=get_dpmi_flags();
	if((_dpmiflags&0x02)==0) _modetype=1; else _modetype=0;
	cprintf("\b\b\b\b\b\bMEM...");
	_totalmemsize=get_total_size();
	_lomemsize=get_lomem_size();
	_himemsize=get_himem_size();
	printf("\b\b\b\b\b\bDone.  \n");
}
Example #29
0
dword_t smp_end_ipc(tcb_t * to_tcb, tcb_t * current)
{
    XIPC_PRINTF("sending end_ipc ipi (to_tcb=%p)\n", to_tcb);

    cpu_mailbox_t * mailbox = get_mailbox();    
    mailbox->tcb = to_tcb;
    mailbox->param[0] = (dword_t)current;

    dword_t status = mailbox->send_command(to_tcb->cpu, SMP_CMD_IPC_END);

    if (status == MAILBOX_OK)
	return 1;

    IPI_PRINTF("smp_end_ipc failed (%x (%d, %x)\n",
	       get_cpu_id(), 
	       to_tcb, to_tcb->cpu, to_tcb->thread_state);

    return 0;
}
Example #30
0
static unsigned long setup_zero_pages(void)
{
	struct cpuid cpu_id;
	unsigned int order;
	unsigned long size;
	struct page *page;
	int i;

	get_cpu_id(&cpu_id);
	switch (cpu_id.machine) {
	case 0x9672:	/* g5 */
	case 0x2064:	/* z900 */
	case 0x2066:	/* z900 */
	case 0x2084:	/* z990 */
	case 0x2086:	/* z990 */
	case 0x2094:	/* z9-109 */
	case 0x2096:	/* z9-109 */
		order = 0;
		break;
	case 0x2097:	/* z10 */
	case 0x2098:	/* z10 */
	default:
		order = 2;
		break;
	}

	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
	if (!empty_zero_page)
		panic("Out of memory in setup_zero_pages");

	page = virt_to_page((void *) empty_zero_page);
	split_page(page, order);
	for (i = 1 << order; i > 0; i--) {
		SetPageReserved(page);
		page++;
	}

	size = PAGE_SIZE << order;
	zero_page_mask = (size - 1) & PAGE_MASK;

	return 1UL << order;
}