Exemple #1
0
void
CPUCapabilities::_SetIntelCapabilities()
{
	cpuid_info baseInfo;
	cpuid_info cpuInfo;
	int32 maxStandardFunction, maxExtendedFunction = 0;

	if (get_cpuid(&baseInfo, 0L, 0L) != B_OK) {
		// this CPU doesn't support cpuid
		return;
	}

	maxStandardFunction = baseInfo.eax_0.max_eax;
	if (maxStandardFunction >= 500) {
		maxStandardFunction = 0; /* old Pentium sample chips has cpu signature here */
	}
	
	/* Extended cpuid */

	get_cpuid(&cpuInfo, 0x80000000, 0L);

	// extended cpuid is only supported if max_eax is greater than the service id
	if (cpuInfo.eax_0.max_eax > 0x80000000) {
		maxExtendedFunction = cpuInfo.eax_0.max_eax & 0xff;
	}
	
	if (maxStandardFunction > 0) {

		get_cpuid(&cpuInfo, 1L, 0L);
		if (cpuInfo.eax_1.features & (1UL << 23)) {
			fCapabilities = CAPABILITY_MMX;
		}
	
		if (cpuInfo.eax_1.features & (1UL << 25)) {
			fCapabilities = CAPABILITY_SSE1;
		}

		if (cpuInfo.eax_1.features & (1UL << 26)) {
			fCapabilities = CAPABILITY_SSE2;
		}

		if (maxStandardFunction >= 1) {
			/* Extended features */
			if (cpuInfo.eax_1.extended_features & (1UL << 0)) {
				fCapabilities = CAPABILITY_SSE3;
			}
			if (cpuInfo.eax_1.extended_features & (1UL << 9)) {
				fCapabilities = CAPABILITY_SSSE3;
			}
			if (cpuInfo.eax_1.extended_features & (1UL << 19)) {
				fCapabilities = CAPABILITY_SSE41;
			}
			if (cpuInfo.eax_1.extended_features & (1UL << 20)) {
				fCapabilities = CAPABILITY_SSE42;
			}
		}
	}
}
Exemple #2
0
/*!	Detect SIMD flags for use in AppServer. Checks all CPUs in the system
	and chooses the minimum supported set of instructions.
*/
static void
detect_simd()
{
#if __INTEL__
	// Only scan CPUs for which we are certain the SIMD flags are properly
	// defined.
	const char* vendorNames[] = {
		"GenuineIntel",
		"AuthenticAMD",
		"CentaurHauls", // Via CPUs, MMX and SSE support
		"RiseRiseRise", // should be MMX-only
		"CyrixInstead", // MMX-only, but custom MMX extensions
		"GenuineTMx86", // MMX and SSE
		0
	};

	system_info systemInfo;
	if (get_system_info(&systemInfo) != B_OK)
		return;

	// We start out with all flags set and end up with only those flags
	// supported across all CPUs found.
	uint32 appServerSIMD = 0xffffffff;

	for (int32 cpu = 0; cpu < systemInfo.cpu_count; cpu++) {
		cpuid_info cpuInfo;
		get_cpuid(&cpuInfo, 0, cpu);

		// Get the vendor string and terminate it manually
		char vendor[13];
		memcpy(vendor, cpuInfo.eax_0.vendor_id, 12);
		vendor[12] = 0;

		bool vendorFound = false;
		for (uint32 i = 0; vendorNames[i] != 0; i++) {
			if (strcmp(vendor, vendorNames[i]) == 0)
				vendorFound = true;
		}

		uint32 cpuSIMD = 0;
		uint32 maxStdFunc = cpuInfo.regs.eax;
		if (vendorFound && maxStdFunc >= 1) {
			get_cpuid(&cpuInfo, 1, 0);
			uint32 edx = cpuInfo.regs.edx;
			if (edx & (1 << 23))
				cpuSIMD |= APPSERVER_SIMD_MMX;
			if (edx & (1 << 25))
				cpuSIMD |= APPSERVER_SIMD_SSE;
		} else {
			// no flags can be identified
			cpuSIMD = 0;
		}
		appServerSIMD &= cpuSIMD;
	}
	gAppServerSIMDFlags = appServerSIMD;
#endif	// __INTEL__
}
Exemple #3
0
int Has_3DNow()
{
	unsigned regs[4];
	get_cpuid(0x80000000, regs);
	if (regs[REG_EAX] < 0x80000000)
		return 0;
	get_cpuid(0x80000001, regs);
	return ((regs[REG_EDX] & (1 << 31)) > 0);
}
Exemple #4
0
void mem_load_test() {
  int size = (main_mem_size-MINADDR)/get_cpucnt(); 
  volatile _UNCACHED unsigned int *addr = TEST_START + get_cpuid()*size;
  for(unsigned int start_time = get_cpu_usecs(); get_cpu_usecs() - start_time < 2000 ;) {
    if (get_cpuid() == NOC_MASTER) {
      ABORT_IF_FAIL(mem_area_test_uncached(addr,size)<0,"FAIL");
    } else {
      mem_area_test_uncached(addr,size);
    }
  }
}
Exemple #5
0
static bool init_vx86(void) {
    int i;
    
    for (i=0; i<(int)(sizeof(VX86_pciDev)/sizeof(VX86_pciDev[0])); i++) VX86_pciDev[i] = NULL;

    // get North Bridge fun0 & South Bridge fun0 & IDE PCI-CFG
    if ((VX86_pciDev[VX86_NB]  = pci_Alloc(0x00, 0x00, 0x00)) == NULL) goto FAIL_INITVX86;
    if ((VX86_pciDev[VX86_SB]  = pci_Alloc(0x00, 0x07, 0x00)) == NULL) goto FAIL_INITVX86;
    if ((VX86_pciDev[VX86_IDE] = pci_Alloc(0x00, 0x0c, 0x00)) == NULL) goto FAIL_INITVX86;
    
    // now we are allowed to call get_cpuid()
    if ((VX86_cpuID = get_cpuid()) == CPU_UNSUPPORTED) goto FAIL_INITVX86;

    if (vx86_CpuID() != CPU_VORTEX86SX)
    {
        // North Bridge fun1 exists (note NB fun1 isn't a normal PCI device -- its vid & did are 0xffff)
        if ((VX86_pciDev[VX86_NB1]  = pci_Alloc(0x00, 0x00, 0x01)) == NULL) goto FAIL_INITVX86;
    }
    if (vx86_CpuID() == CPU_VORTEX86EX || vx86_CpuID() == CPU_VORTEX86DX2 || vx86_CpuID() == CPU_VORTEX86DX3)
    {
        // South Bridge fun1 exists (note SB fun1 isn't a normal PCI device -- its vid & did are 0xffff)
        if ((VX86_pciDev[VX86_SB1]  = pci_Alloc(0x00, 0x07, 0x01)) == NULL) goto FAIL_INITVX86;
    }
    return true;

FAIL_INITVX86:
    for (i=0; i<(int)(sizeof(VX86_pciDev)/sizeof(VX86_pciDev[0])); i++)
    {
        pci_Free(VX86_pciDev[i]);
        VX86_pciDev[i] = NULL;
    }
    err_print((char*)"%s: fail to setup system PCI devices!\n", __FUNCTION__);
    return false;
}
Exemple #6
0
void print_frame(struct frame *tf)
{
	STATIC_INIT_SPIN_LOCK(pflock);
	spin_lock(&pflock);
	printk("TRAP frame at %p from CPU %d\n", tf, get_cpuid());
	print_regs(&tf->tf_regs);
	printk("  es   0x----%04x\n", tf->tf_es);
	printk("  ds   0x----%04x\n", tf->tf_ds);
	printk("  trap 0x%08x %s\n", tf->tf_trapno, trapname(tf->tf_trapno));
	// If this trap was a page fault that just happened
	// (so %cr2 is meaningful), print the faulting linear address.
	if (tf->tf_trapno == T_PGFLT)
		printk("  cr2  0x%08x\n", rcr2());
	printk("  err  0x%08x", tf->tf_err);
	// For page faults, print decoded fault error code:
	// U/K=fault occurred in user/kernel mode
	// W/R=a write/read caused the fault
	// PR=a protection violation caused the fault (NP=page not present).
	if (tf->tf_trapno == T_PGFLT)
		printk(" [%s, %s, %s]\n",
			tf->tf_err & 4 ? "user" : "kernel",
			tf->tf_err & 2 ? "write" : "read",
			tf->tf_err & 1 ? "protection" : "not-present");
	else
		printk("\n");
	printk("  eip  0x%08x\n", tf->tf_eip);
	printk("  cs   0x----%04x\n", tf->tf_cs);
	printk("  flag 0x%08x\n", tf->tf_eflags);
	if ((tf->tf_cs & 3) != 0) {
		printk("  esp  0x%08x\n", tf->tf_esp);
		printk("  ss   0x----%04x\n", tf->tf_ss);
	}
	spin_unlock(&pflock);
}
Exemple #7
0
int main() {

  unsigned i;

  int id = get_cpuid();
  int cnt = get_cpucnt();

  for (i=0; i<MAX; ++i) data[i] = '#';

  for (i=1; i<cnt; ++i) {
    int core_id = i; // The core number
    int parameter = 1; // dummy
    corethread_create(core_id, &work, (void *) &parameter);  
  }

  data[id] = id+'0';

  for (i=0; i<MAX; ++i) UART_DATA = '.';


  // This is a "normal" multicore example where main is executed only
  // on core 0
  for (i=0; i<MAX; ++i) {
    while ((UART_STATUS & 0x01) == 0);
    UART_DATA = data[i];
  }

  for(;;);
}
Exemple #8
0
/*
 * Callback to the ptree walk function during add_cpus.
 * As a part of the args receives a cpu di_node, compares
 * each picl cpu node's cpuid to the device tree node's cpuid.
 * Sets arg struct's result to 1 on a match.
 */
static int
cpu_exists(picl_nodehdl_t nodeh, void *c_args)
{
	di_node_t	di_node;
	cpu_lookup_t	*cpu_arg;
	int	err;
	int	dcpuid, pcpuid;
	int reg_prop[4];

	if (c_args == NULL)
		return (PICL_INVALIDARG);

	cpu_arg = c_args;
	di_node = cpu_arg->di_node;
	dcpuid = get_cpuid(di_node);

	err = ptree_get_propval_by_name(nodeh, OBP_REG, reg_prop,
	    sizeof (reg_prop));

	if (err != PICL_SUCCESS)
		return (PICL_WALK_CONTINUE);

	pcpuid = CFGHDL_TO_CPUID(reg_prop[0]);

	if (dcpuid == pcpuid) {
		cpu_arg->result = 1;
		return (PICL_WALK_TERMINATE);
	}

	cpu_arg->result = 0;
	return (PICL_WALK_CONTINUE);
}
Exemple #9
0
int __patmos_lock_acquire(_LOCK_T *lock) {
  const unsigned cnt = get_cpucnt();
  if (cnt > 1) {

    const unsigned char id = get_cpuid();
    _UNCACHED _LOCK_T *ll = (_UNCACHED _LOCK_T *)lock;

    ll->entering[id] = 1;
    unsigned n = 1 + max(ll);
    ll->number[id] = n;
    ll->entering[id] = 0;

    for (unsigned j = 0; j < cnt; j++) {
      while (ll->entering[j]) {
        /* busy wait */
      }
      unsigned m = ll->number[j];
      while ((m != 0) &&
             ((m < n) || ((m == n) && (j < id)))) {
        /* busy wait, only update m */
        m = ll->number[j];
      }
    }

    // invalidate data cache to establish cache coherence
    inval_dcache();
  }

  return 0;
}
Exemple #10
0
int main() {

  _iodev_ptr_t sspm = (_iodev_ptr_t) PATMOS_IO_OWNSPM;

  ok = 1;
  owner = 0; // start with myself

  for (int i=1; i<get_cpucnt(); ++i) {
    corethread_create(i, &work, NULL); 
  }
  // get first core working
  owner = 1;
  printf("Wait for finish\n");
  while(owner != 0)
    ;
  int id = get_cpuid();
  for (int i=0; i<4; ++i) {
    sspm[4*id + i] = id*0x100 + i;
  }
  int val;
  for (int i=0; i<4; ++i) {
    val = sspm[4*id + i];
    if (id*0x100 + i != val) ok = 0;
  }
  // check one core's write data
  if (sspm[4] != 0x100) ok = 0;

  if (ok) {
    printf("Test ok\n");
  } else {
    printf("Test failed\n");
  }

  return 0;
}
Exemple #11
0
/**
 * Initialize timer_rescheduler interupt
 */
STATIC void init_timer() {
  union apic_timer_lvt_fields_u lvtu = { .fields = get_apic_timer_lvt() };
  ac_u32 out_eax, out_ebx, out_ecx, out_edx;

  // Verify that TSC_DEADLINE is enabled
  //
  // See "Intel 64 and IA-32 Architectures Software Developer's Manual"
  // Volume 3 chapter 10 "Advanded Programmable Interrupt Controller (APIC)"
  // Section 10.5.4.1 "TSC-Deadline Mode"

  get_cpuid(1, &out_eax, &out_ebx, &out_ecx, &out_edx);
  if (AC_GET_BITS(ac_u32, out_ecx, 1, 24) != 1) {
    ac_printf("CPU does not support TSC-Deadline mode\n");
    reset_x86();
  }

  lvtu.fields.vector = TIMER_RESCHEDULE_ISR_INTR; // interrupt vector
  lvtu.fields.disable = AC_FALSE; // interrupt enabled
  lvtu.fields.mode = 2;     // TSC-Deadline
  set_apic_timer_lvt(lvtu.fields);

  slice_default = AcTime_nanos_to_ticks(SLICE_DEFAULT_NANOSECS);

  __atomic_store_n(&timer_reschedule_isr_counter, 0, __ATOMIC_RELEASE);
  set_apic_timer_tsc_deadline(ac_tscrd() + slice_default);

  ac_printf("init_timer:-slice_default_nanosecs=%ld slice_default=%ld\n",
      SLICE_DEFAULT_NANOSECS, slice_default);
}
Exemple #12
0
// The main function for the other threads on the another cores
void work(void *arg) {
  int val = *((int *)arg);

  int id = get_cpuid();
  data[id] = id+'0';


}
Exemple #13
0
void cpu_info()
{
	char basic_info[15];
	get_cpuid(0, basic_info, basic_info + 4, basic_info + 8);
	basic_info[12] = '\n';
	basic_info[13] = '\0';
	disp_str(basic_info);
}
Exemple #14
0
void
AutoSystemInfo::Initialize()
{
    Assert(!initialized);
#ifndef _WIN32
    PAL_InitializeDLL();
    majorVersion = CHAKRA_CORE_MAJOR_VERSION;
    minorVersion = CHAKRA_CORE_MINOR_VERSION;
#endif

    processHandle = GetCurrentProcess();
    GetSystemInfo(this);

    // Make the page size constant so calculation are faster.
    Assert(this->dwPageSize == AutoSystemInfo::PageSize);
#if defined(_M_IX86) || defined(_M_X64)
    get_cpuid(CPUInfo, 1);
    isAtom = CheckForAtom();
#endif
#if defined(_M_ARM32_OR_ARM64)
    armDivAvailable = IsProcessorFeaturePresent(PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE) ? true : false;
#endif
    allocationGranularityPageCount = dwAllocationGranularity / dwPageSize;

    isWindows8OrGreater = IsWindows8OrGreater();

    binaryName[0] = _u('\0');

#if SYSINFO_IMAGE_BASE_AVAILABLE
    dllLoadAddress = (UINT_PTR)&__ImageBase;
    dllHighAddress = (UINT_PTR)&__ImageBase +
        ((PIMAGE_NT_HEADERS)(((char *)&__ImageBase) + __ImageBase.e_lfanew))->OptionalHeader.SizeOfImage;
#endif

    InitPhysicalProcessorCount();
#if DBG
    initialized = true;
#endif

    WCHAR DisableDebugScopeCaptureFlag[MAX_PATH];
    if (::GetEnvironmentVariable(_u("JS_DEBUG_SCOPE"), DisableDebugScopeCaptureFlag, _countof(DisableDebugScopeCaptureFlag)) != 0)
    {
        disableDebugScopeCapture = true;
    }
    else
    {
        disableDebugScopeCapture = false;
    }

    this->shouldQCMoreFrequently = false;
    this->supportsOnlyMultiThreadedCOM = false;
    this->isLowMemoryDevice = false;

    // 0 indicates we haven't retrieved the available commit. We get it lazily.
    this->availableCommit = 0;

    ChakraBinaryAutoSystemInfoInit(this);
}
Exemple #15
0
BOOL
AutoSystemInfo::LZCntAvailable() const
{
    Assert(initialized);
    int CPUInfo[4];
    get_cpuid(CPUInfo, 0x80000001);

    return VirtualSseAvailable(4) && (CPUInfo[2] & (1 << 5));
}
Exemple #16
0
int __patmos_lock_release(_LOCK_T *lock) {
  const unsigned cnt = get_cpucnt();
  if (cnt > 1) {
    const unsigned char id = get_cpuid();
    _UNCACHED _LOCK_T *ll = (_UNCACHED _LOCK_T *)lock;

    ll->number[id] = 0; // exit section
  }
  return 0;
}
Exemple #17
0
int Has_SSE4()
{
	unsigned regs[4];
	int flags[3];
	get_cpuid(1, regs);
	flags[0] = (regs[REG_ECX] & (1 << 19)) > 0;  // SSE 4.1
	flags[1] = (regs[REG_ECX] & (1 << 20)) > 0;  // SSE 4.2
	flags[2] = (regs[REG_ECX] & (1 << 6)) > 0;   // SSE 4A
	return (flags[0] && flags[1] && flags[2]);
}
Exemple #18
0
void __data_resp_handler(void) {
  exc_prologue();
  int tmp = noc_fifo_data_read();
  done[get_cpuid()] = 1;
  // if (get_cpuid() == NOC_MASTER) {
  //   puts("Hello from interrupt handler");
  //   fflush(stdout);
  // }
  intr_clear_pending(exc_get_source());
  exc_epilogue();
}
Exemple #19
0
void noc_test_slave() {
  // Performing libnoc test...
  noc_receive();
  // for (int i = 0; i < 8; ++i) {
  //   *(NOC_SPM_BASE+i) = 0x11223344 * i;
  // }
  noc_write((unsigned)NOC_MASTER,(volatile void _SPM *)(NOC_SPM_BASE+(get_cpuid()*8)),(volatile void _SPM *)NOC_SPM_BASE,32,1);

  while(done[NOC_MASTER] != 1){;}

  return;
}
Exemple #20
0
bool
AutoSystemInfo::CheckForAtom() const
{
    int CPUInfo[4];
    const int GENUINE_INTEL_0 = 0x756e6547,
              GENUINE_INTEL_1 = 0x49656e69,
              GENUINE_INTEL_2 = 0x6c65746e;
    const int PLATFORM_MASK = 0x0fff3ff0;
    const int ATOM_PLATFORM_A = 0x0106c0, /* bonnell - extended model 1c, type 0, family code 6 */
              ATOM_PLATFORM_B = 0x020660, /* lincroft - extended model 26, type 0, family code 6 */
              ATOM_PLATFORM_C = 0x020670, /* saltwell - extended model 27, type 0, family code 6 */
              ATOM_PLATFORM_D = 0x030650, /* tbd - extended model 35, type 0, family code 6 */
              ATOM_PLATFORM_E = 0x030660, /* tbd - extended model 36, type 0, family code 6 */
              ATOM_PLATFORM_F = 0x030670; /* tbd - extended model 37, type 0, family code 6 */
    int platformSignature;

    get_cpuid(CPUInfo, 0);

    // See if CPU is ATOM HW. First check if CPU is genuine Intel.
    if( CPUInfo[1]==GENUINE_INTEL_0 &&
        CPUInfo[3]==GENUINE_INTEL_1 &&
        CPUInfo[2]==GENUINE_INTEL_2)
    {
        get_cpuid(CPUInfo, 1);
        // get platform signature
        platformSignature = CPUInfo[0];
        if((( PLATFORM_MASK & platformSignature) == ATOM_PLATFORM_A) ||
            ((PLATFORM_MASK & platformSignature) == ATOM_PLATFORM_B) ||
            ((PLATFORM_MASK & platformSignature) == ATOM_PLATFORM_C) ||
            ((PLATFORM_MASK & platformSignature) == ATOM_PLATFORM_D) ||
            ((PLATFORM_MASK & platformSignature) == ATOM_PLATFORM_E) ||
            ((PLATFORM_MASK & platformSignature) == ATOM_PLATFORM_F))
        {
            return true;
        }

    }
    return false;
}
Exemple #21
0
void noc_receive() {
  int id = get_cpuid();
  done[id] = 0;
  exc_register(18,&__data_resp_handler);
  //exc_register(19,&__data_resp_handler);
  intr_unmask_all();
  intr_enable();
  //puts("Interrupt handler setup");
  while(done[id] != 1){;}

  intr_disable();
  return;
}
Exemple #22
0
void acquire_lock(LOCK_T * lock){
    /* Write Entering true */
    /* Write Number */
    unsigned remote = lock->remote_cpuid;
    unsigned id = get_cpuid();
    lock->local_entering = 1;
    
    noc_write(remote,
              (void _SPM *)&(lock->remote_ptr->remote_entering),
              (void _SPM *)&lock->local_entering,
              sizeof(lock->local_entering),
              0);

    //#pragma loopbound min 1 max 2
    #pragma loopbound min PKT_TRANS_WAIT max PKT_TRANS_WAIT
    while(!noc_dma_done(remote));
    unsigned n = (unsigned)lock->remote_number + 1;
    lock->local_number = n;
    /* Enforce memory barrier */
    noc_write(remote,
              (void _SPM *)&(lock->remote_ptr->remote_number),
              (void _SPM *)&lock->local_number,
              sizeof(lock->local_number),
              0);

//    /* Enforce memory barrier */
    #pragma loopbound min PKT_TRANS_WAIT max PKT_TRANS_WAIT
    while(!noc_dma_done(remote)); // noc_write() also waits for the dma to be
                                // free, so no need to do it here as well

    /* Write Entering false */
    lock->local_entering = 0;
    noc_write(remote,
              (void _SPM *)&(lock->remote_ptr->remote_entering),
              (void _SPM *)&lock->local_entering,
              sizeof(lock->local_entering),
              0);

    /* Wait for remote core not to change number */
    #pragma loopbound min 1 max 2
    while(lock->remote_entering == 1);
    /* Wait to be the first in line to the bakery queue */
    unsigned m = lock->remote_number;
    #pragma loopbound min 1 max 2
    while( (m != 0) &&
            ( (m < n) || ((m == n) && ( remote < id)))) {
      m = lock->remote_number;
    }
    /* Lock is grabbed */  
    return;
}
Exemple #23
0
void loop(void* arg) {
  int (*test)(int, int) = (int (*)(int, int))arg;
  int i, j;
  int res;
  //communicator_t* loc_com = &comm;
  communicator_t* loc_com = &comm_world;
  //if (test == barrier_master || test == barrier_slave ||
  //    test == broadcast_master || test == broadcast_slave) {
  //  loc_com = &comm_world;
  //}
  //communicator_t stack_com;
  //stack_com.barrier_set = loc_com->barrier_set;
  //for(int i = 0; i < loc_com->count; i++) {
  //  stack_com.addr[i] = loc_com->addr[i];
  //}
  //stack_com.count = loc_com->count;
  //stack_com.msg_size = loc_com->msg_size;
  DEBUGGER("Initial test run\n");
  /* Allow routing/cache setup ahead of time */
  test(1,1024);
  DEBUGGER("Initial test run done\n");
  for( i=0; i < num_sizes; i++){
    if (flush & FLUSH_BETWEEN_SIZES)
      {
      inval_dcache();
      inval_mcache();
    }
    for( j=1; j <= repeat_count; j++){
      if (flush & FLUSH_BETWEEN_REPEATS) {
        inval_dcache();
        inval_mcache();
      }
      mp_barrier(loc_com);
      //mp_barrier(&stack_com);
      res = test(iterations, sizes[i]);
      my_two_printf("%u\t%i\n",sizes[i],res);
      mp_barrier(loc_com);
      //mp_barrier(&stack_com);
    }
  }

	inval_dcache();
	inval_mcache();

  if(get_cpuid() != 0){
    int ret = 0;
    corethread_exit(&ret);
  }
  return;

}
Exemple #24
0
static void slave(void* param) {
  // clear communication areas
  // cannot use memset() for _SPM pointers!
  for(int i = 0; i < sizeof(struct msg_t); i++) {
    ((volatile _SPM char *)spm_in)[i] = 0;
    ((volatile _SPM char *)spm_out)[i] = 0;
  }

  // wait and poll until message arrives
  while(!spm_in->ready) {
    /* spin */
  }

  // PROCESS: add ID to sum_id
  spm_out->sum = spm_in->sum + get_cpuid();
  spm_out->ready = 1;

  // send to next slave
  int rcv_id = (get_cpuid()==(get_cpucnt()-1)) ? 0 : get_cpuid()+1;
  noc_write(rcv_id, spm_in, spm_out, sizeof(struct msg_t), 0);

  return;
}
Exemple #25
0
int __patmos_lock_acquire_recursive(_LOCK_RECURSIVE_T *lock) {
  const unsigned cnt = get_cpucnt();
  if (cnt > 1) {
    const unsigned char id = get_cpuid();
    _UNCACHED _LOCK_RECURSIVE_T *ll = (_UNCACHED _LOCK_RECURSIVE_T *)lock;

    if (ll->owner != id || ll->depth == 0) {
      __lock_acquire(lock->lock);
      ll->owner = id;
    }

    ll->depth++;
  }
  return 0;
}
Exemple #26
0
BrowserApp::BrowserApp()
	:
	BApplication(kApplicationSignature),
	fWindowCount(0),
	fLastWindowFrame(50, 50, 950, 750),
	fLaunchRefsMessage(0),
	fInitialized(false),
	fSettings(NULL),
	fCookies(NULL),
	fSession(NULL),
	fContext(NULL),
	fDownloadWindow(NULL),
	fSettingsWindow(NULL),
	fConsoleWindow(NULL),
	fCookieWindow(NULL)
{
#ifdef __INTEL__
	// First let's check SSE2 is available
	cpuid_info info;
	get_cpuid(&info, 1, 0);

	if ((info.eax_1.features & (1 << 26)) == 0) {
		BAlert alert(B_TRANSLATE("No SSE2 support"), B_TRANSLATE("Your CPU is "
			"too old and does not support the SSE2 extensions, without which "
			"WebPositive cannot run. We recommend installing NetSurf instead."),
			B_TRANSLATE("Darn!"));
		alert.Go();
		exit(-1);
	}
#endif

#if ENABLE_NATIVE_COOKIES
	BString cookieStorePath = kApplicationName;
	cookieStorePath << "/Cookies";
	fCookies = new SettingsMessage(B_USER_SETTINGS_DIRECTORY,
		cookieStorePath.String());
	fContext = new BUrlContext();
	if (fCookies->InitCheck() == B_OK) {
		BMessage cookieArchive = fCookies->GetValue("cookies", cookieArchive);
		fContext->SetCookieJar(BNetworkCookieJar(&cookieArchive));
	}
#endif

	BString sessionStorePath = kApplicationName;
	sessionStorePath << "/Session";
	fSession = new SettingsMessage(B_USER_SETTINGS_DIRECTORY,
		sessionStorePath.String());
}
Exemple #27
0
struct
thread *thd_alloc(struct spd *spd)
{
	struct thread *thd, *new_freelist_head;
	unsigned short int id;
	void *page;

	do {
		thd = thread_freelist_head;
		new_freelist_head = thread_freelist_head->freelist_next;
	} while (unlikely(!cos_cas((unsigned long *)&thread_freelist_head, (unsigned long)thd, (unsigned long)new_freelist_head)));

	if (thd == NULL) {
		printk("cos: Could not create thread.\n");
		return NULL;
	}

	page = cos_get_pg_pool();
	if (unlikely(NULL == page)) {
		printk("cos: Could not allocate the data page for new thread.\n");
		thread_freelist_head = thd;
		return NULL;
	}

	id = thd->thread_id;
	memset(thd, 0, sizeof(struct thread));
	thd->thread_id = id;
	thd->cpu = get_cpuid();

	thd->data_region = page;
	*(int*)page = 4; /* HACK: sizeof(struct cos_argr_placekeeper) */
	thd->ul_data_page = COS_INFO_REGION_ADDR + (PAGE_SIZE * id);
	thd_publish_data_page(thd, (vaddr_t)page);

	/* Initialization */
	thd->stack_ptr = -1;
	/* establish this thread's base spd */
	thd_invocation_push(thd, spd, 0, 0);

	thd->flags = 0;

	thd->pending_upcall_requests = 0;
	thd->freelist_next = NULL;

        fpu_thread_init(thd);

	return thd;
}
Exemple #28
0
void cpuid(t_32 id)
{
	t_64 rax=0,rbx=0,rcx=0,rdx=0;
	rax=get_cpuid(id, &rbx, &rcx, &rdx);
	disp_str("cpuid");
	disp_int(id);
	disp_str(" EAX=");
	disp_int(rax);
	disp_str(" EBX=");
	disp_int(rbx);
	disp_str(" ECX=");
	disp_int(rcx);
	disp_str(" EDX=");
	disp_int(rdx);
	disp_str("\n");
}
Exemple #29
0
void trap_init_percpu()
{
	extern struct seg_descriptor gdt[CPUNUMS + 5];
	extern int ncpu;
	uint32_t cid = get_cpuid();
	struct taskstate *pts = &(thiscpu->cpu_ts);
	//pts->ts_esp0 = KERNEL_STACKTOP - (KERNEL_STKSIZE + KERNEL_STKGAP) * cid;
	pts->ts_esp0 = KERN_STACKTOP;
	pts->ts_ss0 = _KERNEL_DS_;

	gdt[(_TSS0_ >> 3) + cid] = set_seg(STS_T32A, (uint32_t) (pts), sizeof(struct taskstate), 0);
	gdt[(_TSS0_ >> 3) + cid].s = 0;
	
	ltr(_TSS0_ + cid * sizeof(struct seg_descriptor));
	lidt(&idt_pd);
}
Exemple #30
0
const char *get_manufacturer()
{
	unsigned regs[4];
	unsigned info[3];
	int i, index = -1;
	get_cpuid(0, regs);
	info[0] = regs[REG_EBX];
	info[1] = regs[REG_EDX];
	info[2] = regs[REG_ECX];
	for (i = 0; i < 10; ++i)
		if (memcmp(info, manufacturers_hex[i], 3 * sizeof(int)) == 0)
		{
			index = i;
			break;
		}
	return manufacturers_str[index];
}