예제 #1
0
파일: entity.c 프로젝트: knz/snet-rts
static int SetThreadAffinity( pthread_t *pt, affinity_type_t at, ...)
{
  int i, res, numcpus;
  cpu_set_t cpuset;

  res = pthread_getaffinity_np( *pt, sizeof(cpu_set_t), &cpuset);
  if (res != 0) {
    return 1;
  }

  numcpus = CPU_COUNT(&cpuset);
 
  switch(at) {
    case MASKMOD2:
      CPU_ZERO(&cpuset);
      for( i=0; i<numcpus; i+=2) {
        CPU_SET( i, &cpuset);
      }
      break;
    case STRICTLYFIRST:
      if( numcpus > 1) {
        CPU_ZERO(&cpuset);
        CPU_SET(0, &cpuset);
      }
      break;
    case ALLBUTFIRST:
      if( numcpus > 1) {
        CPU_CLR(0, &cpuset);
      }
      break;
    case MAXCORES: {
        int j, num_cores;
        va_list args;
        va_start( args, at);
        num_cores = va_arg( args, int);
        va_end( args);
        for( j=num_cores; j<numcpus; j++) {
          CPU_CLR( j, &cpuset);
        }
      }
      break;
    default:
      break;
  }

  /* set the affinity mask */
  res = pthread_setaffinity_np( *pt, sizeof(cpu_set_t), &cpuset);
  if( res != 0) {
    return 1;
  }

  return 0;
}
예제 #2
0
static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
{
    int i, cpu = -1, nrcpus = 1024;
realloc:
    CPU_ZERO(maskp);

    if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
        if (errno == EINVAL && nrcpus < (1024 << 8)) {
            nrcpus = nrcpus << 2;
            goto realloc;
        }
        perror("sched_getaffinity");
        return -1;
    }

    for (i = 0; i < nrcpus; i++) {
        if (CPU_ISSET(i, maskp)) {
            if (cpu == -1)
                cpu = i;
            else
                CPU_CLR(i, maskp);
        }
    }

    return cpu;
}
예제 #3
0
파일: setaffinity.c 프로젝트: adilbaig/unix
void main()
{
    cpu_set_t set;
    int ret,i;

    //Zero out the struct
    CPU_ZERO(&set);
    CPU_SET(0, &set); //allow CPU 0
    CPU_CLR(1, &set); //disallow CPU 1. This is just for completion. All CPUS were zeroed out by CPU_ZERO

    //Set the processor affinity for this process
    ret = sched_setaffinity(0, sizeof(cpu_set_t), &set);
    if(ret == -1)
        perror("sched_setaffinity");

    /*
     * CPU_SETSIZE is the number of processors that can be represented by cpu_set_t
     */
    for(i=0; i < CPU_SETSIZE; i++)
    {
        int cpu;

        cpu = CPU_ISSET(i, &set);
        if(cpu)
            printf("cpu=%i is %s\n", i, cpu ? "set" : "unset");
    }
}
예제 #4
0
int
acpi_sleep_machdep(struct acpi_softc *sc, int state)
{
	ACPI_STATUS	status;

	if (sc->acpi_wakeaddr == 0ul)
		return (-1);	/* couldn't alloc wake memory */

#ifdef SMP
	suspcpus = all_cpus;
	CPU_CLR(PCPU_GET(cpuid), &suspcpus);
#endif

	if (acpi_resume_beep != 0)
		timer_spkr_acquire();

	AcpiSetFirmwareWakingVector(WAKECODE_PADDR(sc));

	intr_suspend();

	if (savectx(susppcbs[0])) {
		fpususpend(suspfpusave[0]);
#ifdef SMP
		if (!CPU_EMPTY(&suspcpus) &&
		    suspend_cpus(suspcpus) == 0) {
			device_printf(sc->acpi_dev, "Failed to suspend APs\n");
			return (0);	/* couldn't sleep */
		}
#endif

		WAKECODE_FIXUP(resume_beep, uint8_t, (acpi_resume_beep != 0));
		WAKECODE_FIXUP(reset_video, uint8_t, (acpi_reset_video != 0));

		WAKECODE_FIXUP(wakeup_pcb, struct pcb *, susppcbs[0]);
		WAKECODE_FIXUP(wakeup_fpusave, void *, suspfpusave[0]);
		WAKECODE_FIXUP(wakeup_gdt, uint16_t,
		    susppcbs[0]->pcb_gdt.rd_limit);
		WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t,
		    susppcbs[0]->pcb_gdt.rd_base);
		WAKECODE_FIXUP(wakeup_cpu, int, 0);

		/* Call ACPICA to enter the desired sleep state */
		if (state == ACPI_STATE_S4 && sc->acpi_s4bios)
			status = AcpiEnterSleepStateS4bios();
		else
			status = AcpiEnterSleepState(state);

		if (status != AE_OK) {
			device_printf(sc->acpi_dev,
			    "AcpiEnterSleepState failed - %s\n",
			    AcpiFormatException(status));
			return (0);	/* couldn't sleep */
		}

		for (;;)
			ia32_pause();
	}

	return (1);	/* wakeup successfully */
}
RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    /* Will panic if no rendezvousing cpus, so check up front. */
    if (RTMpGetOnlineCount() > 1)
    {
#if __FreeBSD_version >= 900000
        cpuset_t    Mask;
#elif  __FreeBSD_version >= 700000
        cpumask_t   Mask;
#endif
        RTMPARGS    Args;

        Args.pfnWorker = pfnWorker;
        Args.pvUser1 = pvUser1;
        Args.pvUser2 = pvUser2;
        Args.idCpu = RTMpCpuId();
        Args.cHits = 0;
#if __FreeBSD_version >= 700000
# if __FreeBSD_version >= 900000
        Mask = all_cpus;
        CPU_CLR(curcpu, &Mask);
# else
        Mask = ~(cpumask_t)curcpu;
# endif
        smp_rendezvous_cpus(Mask, NULL, rtmpOnOthersFreeBSDWrapper, smp_no_rendezvous_barrier, &Args);
#else
        smp_rendezvous(NULL, rtmpOnOthersFreeBSDWrapper, NULL, &Args);
#endif
    }
    return VINF_SUCCESS;
}
예제 #6
0
/* The job has specialized cores, synchronize user mask with available cores */
static void _validate_mask(uint32_t task_id, hwloc_obj_t obj, cpu_set_t *ts)
{
	int i, j, overlaps = 0;
	bool superset = true;

	for (i = 0; i < CPU_SETSIZE; i++) {
		if (!CPU_ISSET(i, ts))
			continue;
		j = hwloc_bitmap_isset(obj->allowed_cpuset, i);
		if (j > 0) {
			overlaps++;
		} else if (j == 0) {
			CPU_CLR(i, ts);
			superset = false;
		}
	}

	if (overlaps == 0) {
		/* The task's cpu map is completely invalid.
		 * Give it all allowed CPUs */
		for (i = 0; i < CPU_SETSIZE; i++) {
			if (hwloc_bitmap_isset(obj->allowed_cpuset, i) > 0)
				CPU_SET(i, ts);
		}
	}

	if (!superset) {
		info("task/cgroup: Ignoring user CPU binding outside of job "
		     "step allocation for task[%u]", task_id);
		fprintf(stderr, "Requested cpu_bind option outside of job "
			"step allocation for task[%u]\n", task_id);
	}
}
예제 #7
0
int
main(int argc, char *argv[])
{
	int s, j, nprocs;
	cpu_set_t cpuset;
	pthread_t thread;

	thread = pthread_self();
	nprocs = sysconf(_SC_NPROCESSORS_ONLN);

	/* Set affinity mask to include CPUs 0 to 7 */

	CPU_ZERO(&cpuset);
	for (j = 0; j < nprocs; j++)
		CPU_SET(j, &cpuset);


	CPU_CLR(1, &cpuset);
	CPU_CLR(2, &cpuset);
	CPU_CLR(3, &cpuset);
	CPU_CLR(4, &cpuset);
	CPU_CLR(5, &cpuset);
	/* check if the cpu's have actually been set */
	for (j = 0; j < nprocs; j++)
		fprintf(stdout, "CPU: %d, status: %d\n", j, CPU_ISSET(j, &cpuset));

		
	s = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
	if (s != 0)
		handle_error_en(s, "pthread_setaffinity_np");

	/* Check the actual affinity mask assigned to the thread */

	s = pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
	if (s != 0)
		handle_error_en(s, "pthread_getaffinity_np");

	printf("Set returned by pthread_getaffinity_np() contained:\n");
	for (j = 0; j < CPU_SETSIZE; j++)
	if (CPU_ISSET(j, &cpuset))
		printf("    CPU %d\n", j);

	exit(EXIT_SUCCESS);
}
예제 #8
0
static void
smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
{
	int cpu, ncpu, othercpus;
	struct _call_data data;

	othercpus = mp_ncpus - 1;
	if (CPU_ISFULLSET(&mask)) {
		if (othercpus < 1)
			return;
	} else {
		CPU_CLR(PCPU_GET(cpuid), &mask);
		if (CPU_EMPTY(&mask))
			return;
	}
	if (!(read_eflags() & PSL_I))
		panic("%s: interrupts disabled", __func__);
	mtx_lock_spin(&smp_ipi_mtx);
	KASSERT(call_data == NULL, ("call_data isn't null?!"));
	call_data = &data;		
	call_data->func_id = vector;
	call_data->arg1 = addr1;
	call_data->arg2 = addr2;
	atomic_store_rel_int(&smp_tlb_wait, 0);
	if (CPU_ISFULLSET(&mask)) {
		ncpu = othercpus;
		ipi_all_but_self(vector);
	} else {
		ncpu = 0;
		while ((cpu = cpusetobj_ffs(&mask)) != 0) {
			cpu--;
			CPU_CLR(cpu, &mask);
			CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu,
			    vector);
			ipi_send_cpu(cpu, vector);
			ncpu++;
		}
	}
	while (smp_tlb_wait < ncpu)
		ia32_pause();
	call_data = NULL;
	mtx_unlock_spin(&smp_ipi_mtx);
}
예제 #9
0
파일: chcpu.c 프로젝트: rseabra/util-linux
static int cpu_enable(cpu_set_t *cpu_set, size_t setsize, int enable)
{
	unsigned int cpu;
	int online, rc;
	int configured = -1;

	for (cpu = 0; cpu < setsize; cpu++) {
		if (!CPU_ISSET(cpu, cpu_set))
			continue;
		if (!path_exist(_PATH_SYS_CPU "/cpu%d", cpu)) {
			printf(_("CPU %d does not exist\n"), cpu);
			continue;
		}
		if (!path_exist(_PATH_SYS_CPU "/cpu%d/online", cpu)) {
			printf(_("CPU %d is not hot pluggable\n"), cpu);
			continue;
		}
		online = path_getnum(_PATH_SYS_CPU "/cpu%d/online", cpu);
		if ((online == 1) && (enable == 1)) {
			printf(_("CPU %d is already enabled\n"), cpu);
			continue;
		}
		if ((online == 0) && (enable == 0)) {
			printf(_("CPU %d is already disabled\n"), cpu);
			continue;
		}
		if (path_exist(_PATH_SYS_CPU "/cpu%d/configure", cpu))
			configured = path_getnum(_PATH_SYS_CPU "/cpu%d/configure", cpu);
		if (enable) {
			rc = path_writestr("1", _PATH_SYS_CPU "/cpu%d/online", cpu);
			if ((rc == -1) && (configured == 0))
				printf(_("CPU %d enable failed "
					 "(CPU is deconfigured)\n"), cpu);
			else if (rc == -1)
				printf(_("CPU %d enable failed (%m)\n"), cpu);
			else
				printf(_("CPU %d enabled\n"), cpu);
		} else {
			if (onlinecpus && num_online_cpus() == 1) {
				printf(_("CPU %d disable failed "
					 "(last enabled CPU)\n"), cpu);
				continue;
			}
			rc = path_writestr("0", _PATH_SYS_CPU "/cpu%d/online", cpu);
			if (rc == -1)
				printf(_("CPU %d disable failed (%m)\n"), cpu);
			else {
				printf(_("CPU %d disabled\n"), cpu);
				if (onlinecpus)
					CPU_CLR(cpu, onlinecpus);
			}
		}
	}
	return EXIT_SUCCESS;
}
예제 #10
0
// 프로세스의 affinity를 get/set함.
void affinity_control()
{
	// CPU affinity 설정 구조체
	cpu_set_t set;

	// 구조체의 각 CPU값을 모두 reset하는 매크
	CPU_ZERO(&set);
	// 현재 프로세스에 대해 조회함.
	if(sched_getaffinity(0, sizeof(cpu_set_t), &set) == -1)
		errexit("sched_getaffinity");

	// affinity 설정값 확인
	int i;
	// CPU_SETSIZE = 1024
	for (i = 0; i < CPU_SETSIZE; ++i) {
		int cpu;
		// i번 CPU가 set상태인지 확인하는 매크로
		cpu = CPU_ISSET(i, &set);
		printf("CPU #%d = %d\n", i, cpu);
	}


	// affinity 설정
	CPU_ZERO(&set);
	// CPU 0번, 1번을 set한다.
	CPU_SET(0, &set);
	CPU_SET(1, &set);
	// CPU 2번, 3번을 clear한다.
	CPU_CLR(2, &set);
	CPU_CLR(3, &set);

	// 현재 프로세스에 대해 affinity를 set에 설정한 값대로 설정한다.
	if(sched_setaffinity(0, sizeof(cpu_set_t), &set) == -1)
		errexit("sched_getaffinity");

	for (i = 0; i < CPU_SETSIZE; ++i) {
		int cpu;
		// i번 CPU가 set상태인지 확인하는 매크로
		cpu = CPU_ISSET(i, &set);
		printf("CPU #%d = %d\n", i, cpu);
	}
}
mt::LinuxCPUAffinityThreadInitializer::
LinuxCPUAffinityThreadInitializer(const cpu_set_t& cpu)
{
    for (int i = 0; i < CPU_SETSIZE; ++i)
    {
	CPU_CLR(i, &mCPU);
	if (CPU_ISSET(i, &cpu))
	    CPU_SET(i, &mCPU);
    }
    
}
예제 #12
0
파일: omrthreadnuma.c 프로젝트: dinogun/omr
/**
 * Combines the two given bitsets, destination and source, storing their logical AND result into destination
 * @param destination[in/out] One source and a destination of the logical AND
 * @param source[in] The other source of the logical AND
 */
static void
cpuset_logical_and(cpu_set_t *destination, const cpu_set_t *source)
{
	uintptr_t i = 0;

	for (i = 0; i < CPU_SETSIZE; i++) {
		if (!CPU_ISSET(i, source) && CPU_ISSET(i, destination)) {
			CPU_CLR(i, destination);
		}
	}
}
예제 #13
0
void
kdb_panic(const char *msg)
{
#ifdef SMP
    cpuset_t other_cpus;

    other_cpus = all_cpus;
    CPU_CLR(PCPU_GET(cpuid), &other_cpus);
    stop_cpus_hard(other_cpus);
#endif
    printf("KDB: panic\n");
    panic("%s", msg);
}
예제 #14
0
void *
start_server_thread_func (void *ptr)
{
    client_thread = false;
    client_t *client = (client_t *)ptr;
    server_t *server = server_new (&client->buffer);

    server->server_signal = &client->server_signal;
    server->client_signal = &client->client_signal;

    mutex_unlock (client->server_started_mutex);
    prctl (PR_SET_TIMERSLACK, 1);

    pthread_t id = pthread_self ();
    
    int online_cpus = sysconf (_SC_NPROCESSORS_ONLN);
    int available_cpus = sysconf (_SC_NPROCESSORS_CONF);

    if (online_cpus > 1) {
        cpu_set_t cpu_set;
        CPU_ZERO (&cpu_set);
        if (pthread_getaffinity_np (id, sizeof (cpu_set_t), &cpu_set) == 0) {

            /* find first cpu to run on */
            int cpu = 0;
            int i;
            for (i = 1; i < available_cpus; i++) {
                if (CPU_ISSET (i, &cpu_set)) {
                    cpu = i;
                    break;
                }
            }
            /* force server to run on cpu1 */
            if (cpu == 0)
                cpu = 1;
            if (cpu != 0) {
                for (i = 0; i < available_cpus; i++) {
                    if (i != cpu)
                        CPU_CLR (i, &cpu_set);
                }
                CPU_SET (cpu, &cpu_set);
                pthread_setaffinity_np (id, sizeof (cpu_set_t), &cpu_set);
            }
        }
    }

    server_start_work_loop (server);

    server_destroy(server);
    return NULL;
}
예제 #15
0
void
cpu_reset()
{
#ifdef SMP
	cpuset_t map;
	u_int cnt;

	if (smp_started) {
		map = all_cpus;
		CPU_CLR(PCPU_GET(cpuid), &map);
		CPU_NAND(&map, &stopped_cpus);
		if (!CPU_EMPTY(&map)) {
			printf("cpu_reset: Stopping other CPUs\n");
			stop_cpus(map);
		}

		if (PCPU_GET(cpuid) != 0) {
			cpu_reset_proxyid = PCPU_GET(cpuid);
			cpustop_restartfunc = cpu_reset_proxy;
			cpu_reset_proxy_active = 0;
			printf("cpu_reset: Restarting BSP\n");

			/* Restart CPU #0. */
			CPU_SETOF(0, &started_cpus);
			wmb();

			cnt = 0;
			while (cpu_reset_proxy_active == 0 && cnt < 10000000) {
				ia32_pause();
				cnt++;	/* Wait for BSP to announce restart */
			}
			if (cpu_reset_proxy_active == 0)
				printf("cpu_reset: Failed to restart BSP\n");
			enable_intr();
			cpu_reset_proxy_active = 2;

			while (1)
				ia32_pause();
			/* NOTREACHED */
		}

		DELAY(1000000);
	}
#endif
	cpu_reset_real();
	/* NOTREACHED */
}
예제 #16
0
/* The job has specialized cores, synchronize user mask with available cores */
static void _validate_mask(launch_tasks_request_msg_t *req, char *avail_mask)
{
	char *new_mask = NULL, *save_ptr = NULL, *tok;
	cpu_set_t avail_cpus, task_cpus;
	bool superset = true;

	CPU_ZERO(&avail_cpus);
	(void) str_to_cpuset(&avail_cpus, avail_mask);
	tok = strtok_r(req->cpu_bind, ",", &save_ptr);
	while (tok) {
		int i, overlaps = 0;
		char mask_str[1 + CPU_SETSIZE / 4];
		CPU_ZERO(&task_cpus);
		(void) str_to_cpuset(&task_cpus, tok);
		for (i = 0; i < CPU_SETSIZE; i++) {
			if (!CPU_ISSET(i, &task_cpus))
				continue;
			if (CPU_ISSET(i, &avail_cpus)) {
				overlaps++;
			} else {
				CPU_CLR(i, &task_cpus);
				superset = false;
			}
		}
		if (overlaps == 0) {
			/* The task's CPU mask is completely invalid.
			 * Give it all allowed CPUs. */
			for (i = 0; i < CPU_SETSIZE; i++) {
				if (CPU_ISSET(i, &avail_cpus))
					CPU_SET(i, &task_cpus);
			}
		}
		cpuset_to_str(&task_cpus, mask_str);
		if (new_mask)
			xstrcat(new_mask, ",");
		xstrcat(new_mask, mask_str);
		tok = strtok_r(NULL, ",", &save_ptr);
	}

	if (!superset) {
		info("task/affinity: Ignoring user CPU binding outside of job "
		     "step allocation");
	}

	xfree(req->cpu_bind);
	req->cpu_bind = new_mask;
}
예제 #17
0
int main(int argc, char *argv[])
{
	QCoreApplication app(argc, argv);
	qRegisterMetaType< struct timespec >();


	if( Tuning::DoMemLock ) {
		// Prevent this process from being swapped out (technically this is
		// moot since we don't use a swapfile on the Pi)
		mlockall(MCL_CURRENT | MCL_FUTURE);
	}

	if( Tuning::DoRTLimit ){
		struct rlimit rlimits;
		getrlimit(RLIMIT_RTTIME, &rlimits );
		qDebug() << "Current RLIMIT_RTTIME = " << rlimits.rlim_cur;
		rlimits.rlim_cur = Tuning::RTLimit;
		if( setrlimit(RLIMIT_RTTIME, &rlimits ) ) {
			qDebug() << "Error setting RLIMIT_RTTIME (" << errno << "): " << strerror(errno);
		}
	}

// Mask out CPU affinity for all processes
if( Tuning::DoCpuAffinity ) {
	cpu_set_t cpu_set;
	if( sched_getaffinity(0, sizeof(cpu_set_t), &cpu_set ) != 0 ) {
		qDebug() << "Unable to get CPU affinity main thread (" << errno << "): " << strerror(errno);
	}

	CPU_CLR( Tuning::CriticalCpu, &cpu_set );

	if( sched_setaffinity(0, sizeof(cpu_set_t), &cpu_set ) != 0 ) {
		qDebug() << "Unable to set CPU affinity for main thread (" << errno << "): " << strerror(errno);
	}

	sched_getaffinity(0, sizeof(cpu_set_t), &cpu_set );
	qDebug() << "Main thread runs on " << (CPU_COUNT( &cpu_set )) << " cpus";
}

	setScheduler( Tuning::DoFIFOScheduler, false );

	MainThread *mt = new MainThread( &app );

	return app.exec();
}
예제 #18
0
int set_cpu_affinity(const char *str, int inverted)
{
    int ret, i, cpus;
    const char *p, *q;
    cpu_set_t cpu_bitmask;
    q = str;
    cpus = get_number_cpus();
    CPU_ZERO(&cpu_bitmask);
    for (i = 0; inverted && i < cpus; ++i)
        CPU_SET(i, &cpu_bitmask);
    while (p = q, q = next_token(q, ','), p) {
        unsigned int a;	 /* Beginning of range */
        unsigned int b;	 /* End of range */
        unsigned int s;	 /* Stride */
        const char *c1, *c2;
        if (sscanf(p, "%u", &a) < 1)
            return -EINVAL;
        b = a;
        s = 1;
        c1 = next_token(p, '-');
        c2 = next_token(p, ',');
        if (c1 != NULL && (c2 == NULL || c1 < c2)) {
            if (sscanf(c1, "%u", &b) < 1)
                return -EINVAL;
            c1 = next_token(c1, ':');
            if (c1 != NULL && (c2 == NULL || c1 < c2))
                if (sscanf(c1, "%u", &s) < 1)
                    return -EINVAL;
        }
        if (!(a <= b))
            return -EINVAL;
        while (a <= b) {
            if (inverted)
                CPU_CLR(a, &cpu_bitmask);
            else
                CPU_SET(a, &cpu_bitmask);
            a += s;
        }
    }
    ret = sched_setaffinity(getpid(), sizeof(cpu_bitmask),
                            &cpu_bitmask);
    if (ret)
        panic("Can't set this cpu affinity!\n");
    return 0;
}
static int
do_test (void)
{
  cpu_set_t cs;
  if (sched_getaffinity (getpid (), sizeof (cs), &cs) != 0)
    {
      printf ("getaffinity failed: %m\n");
      return 1;
    }

  int result = 0;
  int cpu = 0;
  while (CPU_COUNT (&cs) != 0)
    {
      if (CPU_ISSET (cpu, &cs))
	{
	  cpu_set_t cs2;
	  CPU_ZERO (&cs2);
	  CPU_SET (cpu, &cs2);
	  if (sched_setaffinity (getpid (), sizeof (cs2), &cs2) != 0)
	    {
	      printf ("setaffinity(%d) failed: %m\n", cpu);
	      result = 1;
	    }
	  else
	    {
	      int cpu2 = sched_getcpu ();
	      if (cpu2 == -1 && errno == ENOSYS)
		{
		  puts ("getcpu syscall not implemented");
		  return 0;
		}
	      if (cpu2 != cpu)
		{
		  printf ("getcpu results %d not possible\n", cpu2);
		  result = 1;
		}
	    }
	  CPU_CLR (cpu, &cs);
	}
      ++cpu;
    }

  return result;
}
예제 #20
0
/*
 * send an IPI to all CPUs EXCEPT myself
 */
void
ipi_all_but_self(u_int ipi)
{
	cpuset_t other_cpus;

	/*
	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
	 * of help in order to understand what is the source.
	 * Set the mask of receiving CPUs for this purpose.
	 */
	other_cpus = all_cpus;
	CPU_CLR(PCPU_GET(cpuid), &other_cpus);
	if (ipi == IPI_STOP_HARD)
		CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus);

	CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
	ipi_selected(other_cpus, ipi);
}
예제 #21
0
파일: init.c 프로젝트: cloud-hot/rtems
static void test_cpu_clr_case_1(size_t cpu)
{
  size_t i;

  /*
   * Set to all zeros and verify
   */
  printf( "Exercise CPU_FILL, CPU_CLR(%u), and CPU_ISET\n", cpu );
  CPU_FILL(&set1);
  CPU_CLR(cpu, &set1);

  /* test if all bits except 5 are set */
  for (i=0 ; i<CPU_SETSIZE ; i++) {
    if (i==cpu)
      rtems_test_assert( CPU_ISSET(i, &set1) == 0 );
    else
      rtems_test_assert( CPU_ISSET(i, &set1) == 1 );
  }
}
예제 #22
0
/*
 * send an IPI to a set of cpus.
 */
void
ipi_selected(cpuset_t cpus, u_int ipi)
{
	int cpu;

	/*
	 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit
	 * of help in order to understand what is the source.
	 * Set the mask of receiving CPUs for this purpose.
	 */
	if (ipi == IPI_STOP_HARD)
		CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus);

	while ((cpu = cpusetobj_ffs(&cpus)) != 0) {
		cpu--;
		CPU_CLR(cpu, &cpus);
		CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi);
		ipi_send_cpu(cpu, ipi);
	}
}
예제 #23
0
파일: affset.c 프로젝트: felix021/mycodes
int main(int argc, char *argv[])
{
    pid_t pid;
    if (argc < 3) {
        printf("%s <PID> <CPU_AFFINITY>\ne.g. %s 9527 0110\n", argv[0], argv[0]);
        return 0;
    }

    sscanf(argv[1], "%d", &pid);

    int i, cpu_num = get_cpu_num();
    if (cpu_num < 0) {
        printf("can't get cpu number\n");
        return 1;
    }

    cpu_set_t mask;
    unsigned int len = sizeof(mask);
    CPU_ZERO(&mask);
    for (i = 0; i < cpu_num && argv[2][i] != '\0'; i++) {
        if (argv[2][i] == '1') {
            CPU_SET(i, &mask);
        }
        else if (argv[2][i] == '0') {
            CPU_CLR(i, &mask);
        }
        else {
            printf("bad cpu_affinity, only 0/1 is allowed\n");
            return 2;
        }
    }

    if (sched_setaffinity(pid, len, &mask) < 0) {
        perror("sched_setaffinity");
        return -1;
    }
    
    printf("PID[%d] set to [%s] ok\n", pid, argv[2]);

    return 0;
}
예제 #24
0
int main( int argc, char *argv[])
{
	cpu_set_t set;
	int ret, i;

	CPU_ZERO(&set);
	ret = sched_getaffinity(0, sizeof(cpu_set_t), &set);
	if( ret == -1)
		perror("sched_getaffinity");

	print_status(&set);

	printf("Now, let's trying to unset CPU #1\n");
	CPU_CLR(1, &set);
	ret = sched_setaffinity(0, sizeof(cpu_set_t), &set);
	if( ret == -1)
		perror("sched_setaffinity");

	print_status(&set);

	return 0;
}
예제 #25
0
static void PtyReader_28979140(PtyReader_28979140_Arg* arg) {
  arg->finished = false;
  cpu_set_t cpus;
  ASSERT_EQ(0, sched_getaffinity(0, sizeof(cpu_set_t), &cpus));
  CPU_CLR(arg->main_cpu_id, &cpus);
  ASSERT_EQ(0, sched_setaffinity(0, sizeof(cpu_set_t), &cpus));

  uint32_t counter = 0;
  while (counter <= arg->data_count) {
    char buf[4096];  // Use big buffer to read to hit the bug more easily.
    size_t to_read = std::min(sizeof(buf), (arg->data_count + 1 - counter) * sizeof(uint32_t));
    ASSERT_TRUE(android::base::ReadFully(arg->slave_fd, buf, to_read));
    size_t num_of_value = to_read / sizeof(uint32_t);
    uint32_t* p = reinterpret_cast<uint32_t*>(buf);
    while (num_of_value-- > 0) {
      if (*p++ != counter++) {
        arg->matched = false;
      }
    }
  }
  close(arg->slave_fd);
  arg->finished = true;
}
예제 #26
0
int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
{
	int err = -1, fd, cpu;
	struct cpu_map *cpus;
	struct perf_evsel *evsel;
	unsigned int nr_openat_calls = 111, i;
	cpu_set_t cpu_set;
	struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
	char sbuf[STRERR_BUFSIZE];
	char errbuf[BUFSIZ];

	if (threads == NULL) {
		pr_debug("thread_map__new\n");
		return -1;
	}

	cpus = cpu_map__new(NULL);
	if (cpus == NULL) {
		pr_debug("cpu_map__new\n");
		goto out_thread_map_delete;
	}

	CPU_ZERO(&cpu_set);

	evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
	if (IS_ERR(evsel)) {
		tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
		pr_debug("%s\n", errbuf);
		goto out_thread_map_delete;
	}

	if (perf_evsel__open(evsel, cpus, threads) < 0) {
		pr_debug("failed to open counter: %s, "
			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
			 str_error_r(errno, sbuf, sizeof(sbuf)));
		goto out_evsel_delete;
	}

	for (cpu = 0; cpu < cpus->nr; ++cpu) {
		unsigned int ncalls = nr_openat_calls + cpu;
		/*
		 * XXX eventually lift this restriction in a way that
		 * keeps perf building on older glibc installations
		 * without CPU_ALLOC. 1024 cpus in 2010 still seems
		 * a reasonable upper limit tho :-)
		 */
		if (cpus->map[cpu] >= CPU_SETSIZE) {
			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
			continue;
		}

		CPU_SET(cpus->map[cpu], &cpu_set);
		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
			pr_debug("sched_setaffinity() failed on CPU %d: %s ",
				 cpus->map[cpu],
				 str_error_r(errno, sbuf, sizeof(sbuf)));
			goto out_close_fd;
		}
		for (i = 0; i < ncalls; ++i) {
			fd = openat(0, "/etc/passwd", O_RDONLY);
			close(fd);
		}
		CPU_CLR(cpus->map[cpu], &cpu_set);
	}

	/*
	 * Here we need to explicitly preallocate the counts, as if
	 * we use the auto allocation it will allocate just for 1 cpu,
	 * as we start by cpu 0.
	 */
	if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
		pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
		goto out_close_fd;
	}

	err = 0;

	for (cpu = 0; cpu < cpus->nr; ++cpu) {
		unsigned int expected;

		if (cpus->map[cpu] >= CPU_SETSIZE)
			continue;

		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
			pr_debug("perf_evsel__read_on_cpu\n");
			err = -1;
			break;
		}

		expected = nr_openat_calls + cpu;
		if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
				 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
			err = -1;
		}
	}

	perf_evsel__free_counts(evsel);
out_close_fd:
	perf_evsel__close_fd(evsel, 1, threads->nr);
out_evsel_delete:
	perf_evsel__delete(evsel);
out_thread_map_delete:
	thread_map__put(threads);
	return err;
}
예제 #27
0
/*
 *  stress_tlb_shootdown()
 *	stress out TLB shootdowns
 */
static int stress_tlb_shootdown(const args_t *args)
{
	const size_t page_size = args->page_size;
	const size_t mmap_size = page_size * MMAP_PAGES;
	pid_t pids[MAX_TLB_PROCS];
	cpu_set_t proc_mask_initial;

	if (sched_getaffinity(0, sizeof(proc_mask_initial), &proc_mask_initial) < 0) {
		pr_fail_err("could not get CPU affinity");
		return EXIT_FAILURE;
	}

	do {
		uint8_t *mem, *ptr;
		int retry = 128;
		cpu_set_t proc_mask;
		int32_t tlb_procs, i;
		const int32_t max_cpus = stress_get_processors_configured();

		CPU_ZERO(&proc_mask);
		CPU_OR(&proc_mask, &proc_mask_initial, &proc_mask);

		tlb_procs = max_cpus;
		if (tlb_procs > MAX_TLB_PROCS)
			tlb_procs = MAX_TLB_PROCS;
		if (tlb_procs < MIN_TLB_PROCS)
			tlb_procs = MIN_TLB_PROCS;

		for (;;) {
			mem = mmap(NULL, mmap_size, PROT_WRITE | PROT_READ,
				MAP_SHARED | MAP_ANONYMOUS, -1, 0);
			if ((void *)mem == MAP_FAILED) {
				if ((errno == EAGAIN) ||
				    (errno == ENOMEM) ||
				    (errno == ENFILE)) {
					if (--retry < 0)
						return EXIT_NO_RESOURCE;
				} else {
					pr_fail_err("mmap");
				}
			} else {
				break;
			}
		}
		(void)memset(mem, 0, mmap_size);

		for (i = 0; i < tlb_procs; i++)
			pids[i] = -1;

		for (i = 0; i < tlb_procs; i++) {
			int32_t j, cpu = -1;

			for (j = 0; j < max_cpus; j++) {
				if (CPU_ISSET(j, &proc_mask)) {
					cpu = j;
					CPU_CLR(j, &proc_mask);
					break;
				}
			}
			if (cpu == -1)
				break;

			pids[i] = fork();
			if (pids[i] < 0)
				break;
			if (pids[i] == 0) {
				cpu_set_t mask;
				char buffer[page_size];

				(void)setpgid(0, g_pgrp);
				stress_parent_died_alarm();

				/* Make sure this is killable by OOM killer */
				set_oom_adjustment(args->name, true);

				CPU_ZERO(&mask);
				CPU_SET(cpu % max_cpus, &mask);
				(void)sched_setaffinity(args->pid, sizeof(mask), &mask);

				for (ptr = mem; ptr < mem + mmap_size; ptr += page_size) {
					/* Force tlb shoot down on page */
					(void)mprotect(ptr, page_size, PROT_READ);
					(void)memcpy(buffer, ptr, page_size);
					(void)munmap(ptr, page_size);
				}
				_exit(0);
			}
		}

		for (i = 0; i < tlb_procs; i++) {
			if (pids[i] != -1) {
				int status, ret;

				ret = shim_waitpid(pids[i], &status, 0);
				if ((ret < 0) && (errno == EINTR)) {
					int j;

					/*
					 * We got interrupted, so assume
					 * it was the alarm (timedout) or
					 * SIGINT so force terminate
					 */
					for (j = i; j < tlb_procs; j++) {
						if (pids[j] != -1)
							(void)kill(pids[j], SIGKILL);
					}

					/* re-wait on the failed wait */
					(void)shim_waitpid(pids[i], &status, 0);

					/* and continue waitpid on the pids */
				}
			}
		}
		(void)munmap(mem, mmap_size);
		(void)sched_setaffinity(0, sizeof(proc_mask_initial), &proc_mask_initial);
		inc_counter(args);
	} while (keep_stressing());

	return EXIT_SUCCESS;
}
예제 #28
0
/* The main CPU accumulator thread */
void guppi_accum_thread(void *_args) {

    float **accumulator;      //indexed accumulator[accum_id][chan][subband][stokes]
    char accum_dirty[NUM_SW_STATES];
    struct sdfits_data_columns data_cols[NUM_SW_STATES];
    int payload_type;
    int i, j, k, rv;

    /* Get arguments */
    struct guppi_thread_args *args = (struct guppi_thread_args *)_args;

    /* Set cpu affinity */
    cpu_set_t cpuset, cpuset_orig;
    sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig);
    //CPU_ZERO(&cpuset);
    CPU_CLR(13, &cpuset);
    CPU_SET(9, &cpuset);
    rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
    if (rv<0) { 
        guppi_error("guppi_accum_thread", "Error setting cpu affinity.");
        perror("sched_setaffinity");
    }

    /* Set priority */
    rv = setpriority(PRIO_PROCESS, 0, args->priority);
    if (rv<0) {
        guppi_error("guppi_accum_thread", "Error setting priority level.");
        perror("set_priority");
    }

    /* Attach to status shared mem area */
    struct guppi_status st;
    rv = guppi_status_attach(&st);
    if (rv!=GUPPI_OK) {
        guppi_error("guppi_accum_thread", 
                "Error attaching to status shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_status_detach, &st);
    pthread_cleanup_push((void *)set_exit_status, &st);
    pthread_cleanup_push((void *)guppi_thread_set_finished, args);

    /* Init status */
    guppi_status_lock_safe(&st);
    hputs(st.buf, STATUS_KEY, "init");
    guppi_status_unlock_safe(&st);

    /* Read in general parameters */
    struct guppi_params gp;
    struct sdfits sf;
    pthread_cleanup_push((void *)guppi_free_sdfits, &sf);

    /* Attach to databuf shared mem */
    struct guppi_databuf *db_in, *db_out;
    db_in = guppi_databuf_attach(args->input_buffer);
    char errmsg[256];
    if (db_in==NULL) {
        sprintf(errmsg,
                "Error attaching to input databuf(%d) shared memory.", 
                args->input_buffer);
        guppi_error("guppi_accum_thread", errmsg);
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_databuf_detach, db_in);
    db_out = guppi_databuf_attach(args->output_buffer);
    if (db_out==NULL) {
        sprintf(errmsg,
                "Error attaching to output databuf(%d) shared memory.", 
                args->output_buffer);
        guppi_error("guppi_accum_thread", errmsg);
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_databuf_detach, db_out);

    /* Determine high/low bandwidth mode */
    char bw_mode[16];
    if (hgets(st.buf, "BW_MODE", 16, bw_mode))
    {
        if(strncmp(bw_mode, "high", 4) == 0)
            payload_type = INT_PAYLOAD;
        else if(strncmp(bw_mode, "low", 3) == 0)
            payload_type = FLOAT_PAYLOAD;
        else
            guppi_error("guppi_net_thread", "Unsupported bandwidth mode");
    }
    else
        guppi_error("guppi_net_thread", "BW_MODE not set");

    /* Read nchan and nsubband from status shared memory */
    guppi_read_obs_params(st.buf, &gp, &sf);

    /* Allocate memory for vector accumulators */
    create_accumulators(&accumulator, sf.hdr.nchan, sf.hdr.nsubband);
    pthread_cleanup_push((void *)destroy_accumulators, accumulator);

    /* Clear the vector accumulators */
    for(i = 0; i < NUM_SW_STATES; i++) accum_dirty[i] = 1;
    reset_accumulators(accumulator, data_cols, accum_dirty, sf.hdr.nsubband, sf.hdr.nchan);

    /* Loop */
    int curblock_in=0, curblock_out=0;
    int first=1;
    float reqd_exposure=0;
    double accum_time=0;
    int integ_num;
    float pfb_rate;
    int heap, accumid, struct_offset, array_offset;
    char *hdr_in=NULL, *hdr_out=NULL;
    struct databuf_index *index_in, *index_out;

    int nblock_int=0, npacket=0, n_pkt_drop=0, n_heap_drop=0;

    signal(SIGINT,cc);
    while (run) {

        /* Note waiting status */
        guppi_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "waiting");
        guppi_status_unlock_safe(&st);

        /* Wait for buf to have data */
        rv = guppi_databuf_wait_filled(db_in, curblock_in);
        if (rv!=0) continue;

        /* Note waiting status and current block*/
        guppi_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "accumulating");
        hputi4(st.buf, "ACCBLKIN", curblock_in);
        guppi_status_unlock_safe(&st);

        /* Read param struct for this block */
        hdr_in = guppi_databuf_header(db_in, curblock_in);
        if (first) 
            guppi_read_obs_params(hdr_in, &gp, &sf);
        else
            guppi_read_subint_params(hdr_in, &gp, &sf);

        /* Do any first time stuff: first time code runs, not first time process this block */
        if (first) {

            /* Set up first output header. This header is copied from block to block
               each time a new block is created */
            hdr_out = guppi_databuf_header(db_out, curblock_out);
            memcpy(hdr_out, guppi_databuf_header(db_in, curblock_in),
                    GUPPI_STATUS_SIZE);

            /* Read required exposure and PFB rate from status shared memory */
            reqd_exposure = sf.data_columns.exposure;
            pfb_rate = sf.hdr.efsampfr / (2 * sf.hdr.nchan);

            /* Initialise the index in the output block */
            index_out = (struct databuf_index*)guppi_databuf_index(db_out, curblock_out);
            index_out->num_datasets = 0;
            index_out->array_size = sf.hdr.nsubband * sf.hdr.nchan * NUM_STOKES * 4;

            first=0;
        }

        /* Loop through each spectrum (heap) in input buffer */
        index_in = (struct databuf_index*)guppi_databuf_index(db_in, curblock_in);

        for(heap = 0; heap < index_in->num_heaps; heap++)
        {
            /* If invalid, record it and move to next heap */
            if(!index_in->cpu_gpu_buf[heap].heap_valid)
            {
                n_heap_drop++;
                continue;
            }

            /* Read in heap from buffer */
            char* heap_addr = (char*)(guppi_databuf_data(db_in, curblock_in) +
                                sizeof(struct freq_spead_heap) * heap);
            struct freq_spead_heap* freq_heap = (struct freq_spead_heap*)(heap_addr);

            char* payload_addr = (char*)(guppi_databuf_data(db_in, curblock_in) +
                                sizeof(struct freq_spead_heap) * MAX_HEAPS_PER_BLK +
                                (index_in->heap_size - sizeof(struct freq_spead_heap)) * heap );
            int *i_payload = (int*)(payload_addr);
            float *f_payload = (float*)(payload_addr);

            accumid = freq_heap->status_bits & 0x7;         

            /*Debug: print heap */
/*            printf("%d, %d, %d, %d, %d, %d\n", freq_heap->time_cntr, freq_heap->spectrum_cntr,
                freq_heap->integ_size, freq_heap->mode, freq_heap->status_bits,
                freq_heap->payload_data_off);
*/

            /* If we have accumulated for long enough, write vectors to output block */
            if(accum_time >= reqd_exposure)
            {
                for(i = 0; i < NUM_SW_STATES; i++)
                {
                    /*If a particular accumulator is dirty, write it to output buffer */
                    if(accum_dirty[i])
                    {
                        /*If insufficient space, first mark block as filled and request new block*/
                        index_out = (struct databuf_index*)(guppi_databuf_index(db_out, curblock_out));

                        if( (index_out->num_datasets+1) *
                            (index_out->array_size + sizeof(struct sdfits_data_columns)) > 
                            db_out->block_size)
                        {
                            printf("Accumulator finished with output block %d\n", curblock_out);

                            /* Write block number to status buffer */
                            guppi_status_lock_safe(&st);
                            hputi4(st.buf, "ACCBLKOU", curblock_out);
                            guppi_status_unlock_safe(&st);

                            /* Update packet count and loss fields in output header */
                            hputi4(hdr_out, "NBLOCK", nblock_int);
                            hputi4(hdr_out, "NPKT", npacket);
                            hputi4(hdr_out, "NPKTDROP", n_pkt_drop);
                            hputi4(hdr_out, "NHPDROP", n_heap_drop);

                            /* Close out current integration */
                            guppi_databuf_set_filled(db_out, curblock_out);

                            /* Wait for next output buf */
                            curblock_out = (curblock_out + 1) % db_out->n_block;
                            guppi_databuf_wait_free(db_out, curblock_out);

                            while ((rv=guppi_databuf_wait_free(db_out, curblock_out)) != GUPPI_OK)
                            {
                                if (rv==GUPPI_TIMEOUT) {
                                    guppi_warn("guppi_accum_thread", "timeout while waiting for output block");
                                    continue;
                                } else {
                                    guppi_error("guppi_accum_thread", "error waiting for free databuf");
                                    run=0;
                                    pthread_exit(NULL);
                                    break;
                                }
                            }

                            hdr_out = guppi_databuf_header(db_out, curblock_out);
                            memcpy(hdr_out, guppi_databuf_header(db_in, curblock_in),
                                    GUPPI_STATUS_SIZE);

                            /* Initialise the index in new output block */
                            index_out = (struct databuf_index*)guppi_databuf_index(db_out, curblock_out);
                            index_out->num_datasets = 0;
                            index_out->array_size = sf.hdr.nsubband * sf.hdr.nchan * NUM_STOKES * 4;
                            
                            nblock_int=0;
                            npacket=0;
                            n_pkt_drop=0;
                            n_heap_drop=0;
                        }            

                        /*Update index for output buffer*/
                        index_out = (struct databuf_index*)(guppi_databuf_index(db_out, curblock_out));

                        if(index_out->num_datasets == 0)
                            struct_offset = 0;
                        else
                            struct_offset = index_out->disk_buf[index_out->num_datasets-1].array_offset +
                                            index_out->array_size;

                        array_offset =  struct_offset + sizeof(struct sdfits_data_columns);
                        index_out->disk_buf[index_out->num_datasets].struct_offset = struct_offset;
                        index_out->disk_buf[index_out->num_datasets].array_offset = array_offset;

                        /*Copy sdfits_data_columns struct to disk buffer */
                        memcpy(guppi_databuf_data(db_out, curblock_out) + struct_offset,
                                &data_cols[i], sizeof(struct sdfits_data_columns));

                        /*Copy data array to disk buffer */
                        memcpy(guppi_databuf_data(db_out, curblock_out) + array_offset,
                                accumulator[i], index_out->array_size);
                        
                        /*Update SDFITS data_columns pointer to data array */
                        ((struct sdfits_data_columns*)
                        (guppi_databuf_data(db_out, curblock_out) + struct_offset))->data = 
                        (unsigned char*)(guppi_databuf_data(db_out, curblock_out) + array_offset);

                        index_out->num_datasets = index_out->num_datasets + 1;
                    }
                
                }

                accum_time = 0;
                integ_num += 1;

                reset_accumulators(accumulator, data_cols, accum_dirty,
                                sf.hdr.nsubband, sf.hdr.nchan);
            }

            /* Only add spectrum to accumulator if blanking bit is low */
            if((freq_heap->status_bits & 0x08) == 0)
            {
                /* Fill in data columns header fields */
                if(!accum_dirty[accumid])
                {
                    /*Record SPEAD header fields*/
                    data_cols[accumid].time = index_in->cpu_gpu_buf[heap].heap_rcvd_mjd;
                    data_cols[accumid].time_counter = freq_heap->time_cntr;
                    data_cols[accumid].integ_num = integ_num;
                    data_cols[accumid].sttspec = freq_heap->spectrum_cntr;
                    data_cols[accumid].accumid = accumid;

                    /* Fill in rest of fields from status buffer */
                    strcpy(data_cols[accumid].object, sf.data_columns.object);
                    data_cols[accumid].azimuth = sf.data_columns.azimuth;
                    data_cols[accumid].elevation = sf.data_columns.elevation;
                    data_cols[accumid].bmaj = sf.data_columns.bmaj;
                    data_cols[accumid].bmin = sf.data_columns.bmin;
                    data_cols[accumid].bpa = sf.data_columns.bpa;
                    data_cols[accumid].centre_freq_idx = sf.data_columns.centre_freq_idx;
                    data_cols[accumid].ra = sf.data_columns.ra;
                    data_cols[accumid].dec = sf.data_columns.dec;
                    data_cols[accumid].exposure = 0.0;

                    for(i = 0; i < NUM_SW_STATES; i++)
                        data_cols[accumid].centre_freq[i] = sf.data_columns.centre_freq[i];

                    accum_dirty[accumid] = 1;
                }

                data_cols[accumid].exposure += (float)(freq_heap->integ_size)/pfb_rate;
                data_cols[accumid].stpspec = freq_heap->spectrum_cntr;

                /* Add spectrum to appropriate vector accumulator (high-bw mode) */
                if(payload_type == INT_PAYLOAD)
                {
                    for(i = 0; i < sf.hdr.nchan; i++)
                    {
                        for(j = 0; j < sf.hdr.nsubband; j++)
                        {
                            for(k = 0; k < NUM_STOKES; k++)
                            {
                                accumulator[accumid]
                                           [i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k] +=
                                    (float)i_payload[i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k];
                            }
                        }
                    }
                }

                /* Add spectrum to appropriate vector accumulator (low-bw mode) */
                else
                {
                    for(i = 0; i < sf.hdr.nchan; i++)
                    {
                        for(j = 0; j < sf.hdr.nsubband; j++)
                        {
                            for(k = 0; k < NUM_STOKES; k++)
                            {
                                accumulator[accumid]
                                           [i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k] +=
                                    f_payload[i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k];
                            }
                        }
                    }
                }

            }
            
            accum_time += (double)freq_heap->integ_size / pfb_rate;
        }

        /* Update packet count and loss fields from input header */
        nblock_int++;
        npacket += gp.num_pkts_rcvd;
        n_pkt_drop += gp.num_pkts_dropped;

        /* Done with current input block */
        guppi_databuf_set_free(db_in, curblock_in);
        curblock_in = (curblock_in + 1) % db_in->n_block;

        /* Check for cancel */
        pthread_testcancel();
    }

    pthread_exit(NULL);
    pthread_cleanup_pop(0); /* Closes set_exit_status */
    pthread_cleanup_pop(0); /* Closes set_finished */
    pthread_cleanup_pop(0); /* Closes guppi_free_sdfits */
    pthread_cleanup_pop(0); /* Closes ? */
    pthread_cleanup_pop(0); /* Closes destroy_accumulators */
    pthread_cleanup_pop(0); /* Closes guppi_status_detach */
    pthread_cleanup_pop(0); /* Closes guppi_databuf_detach */
}
예제 #29
0
파일: subr_smp.c 프로젝트: AhmadTux/freebsd
void
smp_rendezvous_cpus(cpuset_t map,
	void (* setup_func)(void *), 
	void (* action_func)(void *),
	void (* teardown_func)(void *),
	void *arg)
{
	int curcpumap, i, ncpus = 0;

	/* Look comments in the !SMP case. */
	if (!smp_started) {
		spinlock_enter();
		if (setup_func != NULL)
			setup_func(arg);
		if (action_func != NULL)
			action_func(arg);
		if (teardown_func != NULL)
			teardown_func(arg);
		spinlock_exit();
		return;
	}

	CPU_FOREACH(i) {
		if (CPU_ISSET(i, &map))
			ncpus++;
	}
	if (ncpus == 0)
		panic("ncpus is 0 with non-zero map");

	mtx_lock_spin(&smp_ipi_mtx);

	/* Pass rendezvous parameters via global variables. */
	smp_rv_ncpus = ncpus;
	smp_rv_setup_func = setup_func;
	smp_rv_action_func = action_func;
	smp_rv_teardown_func = teardown_func;
	smp_rv_func_arg = arg;
	smp_rv_waiters[1] = 0;
	smp_rv_waiters[2] = 0;
	smp_rv_waiters[3] = 0;
	atomic_store_rel_int(&smp_rv_waiters[0], 0);

	/*
	 * Signal other processors, which will enter the IPI with
	 * interrupts off.
	 */
	curcpumap = CPU_ISSET(curcpu, &map);
	CPU_CLR(curcpu, &map);
	ipi_selected(map, IPI_RENDEZVOUS);

	/* Check if the current CPU is in the map */
	if (curcpumap != 0)
		smp_rendezvous_action();

	/*
	 * Ensure that the master CPU waits for all the other
	 * CPUs to finish the rendezvous, so that smp_rv_*
	 * pseudo-structure and the arg are guaranteed to not
	 * be in use.
	 */
	while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
		cpu_spinwait();

	mtx_unlock_spin(&smp_ipi_mtx);
}
예제 #30
0
static void
xctrl_suspend()
{
#ifdef SMP
	cpuset_t cpu_suspend_map;
#endif
	int suspend_cancelled;

	EVENTHANDLER_INVOKE(power_suspend);

	if (smp_started) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
	}
	KASSERT((PCPU_GET(cpuid) == 0), ("Not running on CPU#0"));

	/*
	 * Clear our XenStore node so the toolstack knows we are
	 * responding to the suspend request.
	 */
	xs_write(XST_NIL, "control", "shutdown", "");

	/*
	 * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
	 * drivers need this.
	 */
	mtx_lock(&Giant);
	if (DEVICE_SUSPEND(root_bus) != 0) {
		mtx_unlock(&Giant);
		printf("%s: device_suspend failed\n", __func__);
		return;
	}
	mtx_unlock(&Giant);

#ifdef SMP
	CPU_ZERO(&cpu_suspend_map);	/* silence gcc */
	if (smp_started) {
		/*
		 * Suspend other CPUs. This prevents IPIs while we
		 * are resuming, and will allow us to reset per-cpu
		 * vcpu_info on resume.
		 */
		cpu_suspend_map = all_cpus;
		CPU_CLR(PCPU_GET(cpuid), &cpu_suspend_map);
		if (!CPU_EMPTY(&cpu_suspend_map))
			suspend_cpus(cpu_suspend_map);
	}
#endif

	/*
	 * Prevent any races with evtchn_interrupt() handler.
	 */
	disable_intr();
	intr_suspend();
	xen_hvm_suspend();

	suspend_cancelled = HYPERVISOR_suspend(0);

	xen_hvm_resume(suspend_cancelled != 0);
	intr_resume(suspend_cancelled != 0);
	enable_intr();

	/*
	 * Reset grant table info.
	 */
	gnttab_resume(NULL);

#ifdef SMP
	/* Send an IPI_BITMAP in case there are pending bitmap IPIs. */
	lapic_ipi_vectored(IPI_BITMAP_VECTOR, APIC_IPI_DEST_ALL);
	if (smp_started && !CPU_EMPTY(&cpu_suspend_map)) {
		/*
		 * Now that event channels have been initialized,
		 * resume CPUs.
		 */
		resume_cpus(cpu_suspend_map);
	}
#endif

	/*
	 * FreeBSD really needs to add DEVICE_SUSPEND_CANCEL or
	 * similar.
	 */
	mtx_lock(&Giant);
	DEVICE_RESUME(root_bus);
	mtx_unlock(&Giant);

	if (smp_started) {
		thread_lock(curthread);
		sched_unbind(curthread);
		thread_unlock(curthread);
	}

	EVENTHANDLER_INVOKE(power_resume);

	if (bootverbose)
		printf("System resumed after suspension\n");

}