コード例 #1
0
ファイル: cpu.c プロジェクト: AlainODea/illumos-gate
int
fmd_fmri_unusable(nvlist_t *nvl)
{
	int rc, err = 0;
	uint8_t version;
	uint32_t cpuid;
	topo_hdl_t *thp;

	if (nvlist_lookup_uint8(nvl, FM_VERSION, &version) != 0 ||
	    version > FM_CPU_SCHEME_VERSION ||
	    nvlist_lookup_uint32(nvl, FM_FMRI_CPU_ID, &cpuid) != 0)
		return (fmd_fmri_set_errno(EINVAL));

	/*
	 * If the cpu-scheme topology exports this method unusable(), invoke it.
	 */
	if ((thp = fmd_fmri_topo_hold(TOPO_VERSION)) == NULL)
		return (fmd_fmri_set_errno(EINVAL));
	rc = topo_fmri_unusable(thp, nvl, &err);
	fmd_fmri_topo_rele(thp);
	if (err != ETOPO_METHOD_NOTSUP)
		return (rc);

	return (p_online(cpuid, P_STATUS) == P_FAULTED);
}
コード例 #2
0
ファイル: cpu.c プロジェクト: GNOME/libgtop
void
glibtop_get_cpu_s (glibtop *server, glibtop_cpu *buf)
{
    kstat_ctl_t * const kc = server->machine->kc;
    cpu_stat_t cpu_stat;
    processorid_t cpu;
    int ncpu, found;

    memset (buf, 0, sizeof (glibtop_cpu));

    buf->frequency = server->machine->ticks;
    buf->flags = _glibtop_sysdeps_cpu_freq;

    if(!kc)
	return;

    switch(kstat_chain_update(kc))
    {
	case -1: assert(0); /* Debugging purposes, shouldn't happen */
	case 0:  break;
	default: glibtop_get_kstats(server);
    }

    ncpu = MIN(GLIBTOP_NCPU, server->ncpu);

    for (cpu = 0, found = 0; cpu < GLIBTOP_NCPU && found != ncpu; cpu++)
    {
	kstat_t * const ksp = server->machine->cpu_stat_kstat [cpu];
	if (!ksp) continue;;

	++found;

	if(p_online(cpu, P_STATUS) == P_ONLINE)
	    buf->xcpu_flags |= (1L << cpu);
	else
	    continue;

	if (kstat_read (kc, ksp, &cpu_stat) == -1) {
	    glibtop_warn_io_r (server, "kstat_read (cpu_stat%d)", cpu);
	    continue;
	}

	buf->xcpu_idle [cpu] = cpu_stat.cpu_sysinfo.cpu [CPU_IDLE];
	buf->xcpu_user [cpu] = cpu_stat.cpu_sysinfo.cpu [CPU_USER];
	buf->xcpu_sys [cpu] = cpu_stat.cpu_sysinfo.cpu [CPU_KERNEL];
	buf->xcpu_total [cpu] = buf->xcpu_idle [cpu] + buf->xcpu_user [cpu] +
	    buf->xcpu_sys [cpu];

	buf->idle += cpu_stat.cpu_sysinfo.cpu [CPU_IDLE];
	buf->user += cpu_stat.cpu_sysinfo.cpu [CPU_USER];
	buf->sys  += cpu_stat.cpu_sysinfo.cpu [CPU_KERNEL];
    }

    if(!found)
	return;

    buf->total = buf->idle + buf->user + buf->sys;
    buf->flags = _glibtop_sysdeps_cpu_all;
}
コード例 #3
0
ファイル: ThreadPool.cpp プロジェクト: hksonngan/framewave
U32 SysCoreCount()
{
    U32 count = 0;
    processorid_t i;
    for (i = 0; i <= MAX_CORES; i++)
        if (p_online(i, P_STATUS) == P_ONLINE)
            count++;

    return MAX((U32)1,count);
}
コード例 #4
0
ファイル: ProcessorMap.cpp プロジェクト: dfyer/joins
ProcessorMap::ProcessorMap() { 
  m_nProcs = 0;
  m_p_nProcessor_Ids = NULL;

  m_nProcs = DetermineNumberOfProcessors();
  if( m_nProcs <= 0 ) {
#ifdef OS_SOLARIS
    fatal("sysconf() reports %i processors online.\n", m_nProcs );
#endif
#ifdef OS_LINUX
    fatal("sched_getaffinity() reports empty processor mask.\n");
#endif
  }

  m_p_nProcessor_Ids = new int[m_nProcs];
  if(m_p_nProcessor_Ids == NULL ) {
    fatal("new int[%i] returned NULL -- out of memory?\n", m_nProcs );
  }

  int i;
  int n = 0;

#ifdef OS_SOLARIS
  int status;
  for(i=0;n<m_nProcs && i<4096 ;i++) {
    status = p_online(i,P_STATUS);
    if(status==-1 && errno==EINVAL) continue;
    
    m_p_nProcessor_Ids[n] = i;
    n++;
  }

#endif
#ifdef OS_LINUX
  cpu_set_t cpus;

  // Returns number of processors available to process (based on affinity mask)
  if( sched_getaffinity(0, sizeof(cpus), (cpu_set_t*) &cpus) < 0) {
    fatal("sched_getaffinity() reports empty processor mask.\n" );
  }

  for (i = 0; n<m_nProcs && i < sizeof(cpus)*8; i++) {
    if( CPU_ISSET( i, &cpus ) ) {
      m_p_nProcessor_Ids[n] = i;
      n++;
    }
  }

#endif

  if( n != m_nProcs ) {
    fatal("Unable to find all processor numbers.\n" );
  }

} 
コード例 #5
0
ファイル: cpu_set.cpp プロジェクト: glycerine/shore-mt
/**
 *  @brief
 *
 *  @param cpu_set The CPU set.
 *
 *  @return 0 on success. Negative value on error.
 */
static void cpu_set_init_Solaris(cpu_set_p cpu_set)
{

  int i;
  int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);


  /* allocate cpus */
  cpu_t cpus =
    (cpu_t)malloc( num_cpus * sizeof(struct cpu_s) );
  if ( cpus == NULL )
      THROW(BadAlloc);
      
  for (i = 0; i < num_cpus; i++)
  {
    /* initialize fields */
    memset( &cpus[i], 0, sizeof(struct cpu_s) );
  }


  /* find the CPUs on the system */
  int num_found = 0;
  int cpu_num;
  for (cpu_num = 0; ; cpu_num++)
  {
    int status = p_online(cpu_num, P_STATUS);
    if ( (status == -1) && (errno == EINVAL) )
      continue;
    
    /* found a new CPU */
    cpus[num_found].cpu_unique_id = cpu_num;
    cpus[num_found].cpu_id = cpu_num;
    if ( processor_info( cpu_num, &cpus[num_found].cpu_proc_info ) ) {
        free(cpus);
        THROW4(ThreadException,
                        "processor_info() failed with %s on CPU %d/%d\n",
                        errno_to_str().data(),
                        cpu_num+1,
                        num_cpus);
    }

    num_found++;
    if ( num_found == num_cpus )
      break;
  }


  /* return parameters */
  cpu_set->cpuset_num_cpus = num_cpus;
  cpu_set->cpuset_cpus     = cpus;
}
コード例 #6
0
ファイル: processor.c プロジェクト: HugoGuiroux/phoenix
/* Test whether processor CPU_ID is available. */
bool proc_is_available (int cpu_id)
{
#ifdef _LINUX_
    int ret;
    cpu_set_t cpu_set;
    
    ret = sched_getaffinity (0, sizeof (cpu_set), &cpu_set);
    if (ret < 0) return false;

    return CPU_ISSET (cpu_id, &cpu_set) ? true : false;
#elif defined (_SOLARIS_)
    return (p_online (cpu_id, P_STATUS) == P_ONLINE);
#endif
}
コード例 #7
0
ファイル: acquire.c プロジェクト: AlfredArouna/illumos-gate
static int
acquire_cpus(struct snapshot *ss, kstat_ctl_t *kc)
{
	size_t i;

	ss->s_nr_cpus = sysconf(_SC_CPUID_MAX) + 1;
	ss->s_cpus = calloc(ss->s_nr_cpus, sizeof (struct cpu_snapshot));
	if (ss->s_cpus == NULL)
		goto out;

	for (i = 0; i < ss->s_nr_cpus; i++) {
		kstat_t *ksp;

		ss->s_cpus[i].cs_id = ID_NO_CPU;
		ss->s_cpus[i].cs_state = p_online(i, P_STATUS);
		/* If no valid CPU is present, move on to the next one */
		if (ss->s_cpus[i].cs_state == -1)
			continue;
		ss->s_cpus[i].cs_id = i;

		if ((ksp = kstat_lookup_read(kc, "cpu_info", i, NULL)) == NULL)
			goto out;

		(void) pset_assign(PS_QUERY, i, &ss->s_cpus[i].cs_pset_id);
		if (ss->s_cpus[i].cs_pset_id == PS_NONE)
			ss->s_cpus[i].cs_pset_id = ID_NO_PSET;

		if (!CPU_ACTIVE(&ss->s_cpus[i]))
			continue;

		if ((ksp = kstat_lookup_read(kc, "cpu", i, "vm")) == NULL)
			goto out;

		if (kstat_copy(ksp, &ss->s_cpus[i].cs_vm))
			goto out;

		if ((ksp = kstat_lookup_read(kc, "cpu", i, "sys")) == NULL)
			goto out;

		if (kstat_copy(ksp, &ss->s_cpus[i].cs_sys))
			goto out;
	}

	errno = 0;
out:
	return (errno);
}
コード例 #8
0
ファイル: cda_cpu.c プロジェクト: andreiw/polaris
static void
cda_cpu_offline(fmd_hdl_t *hdl, uint_t cpuid, int cpustate)
{
	int i;

	for (i = 0; i < cda.cda_cpu_tries;
	    i++, (void) nanosleep(&cda.cda_cpu_delay, NULL)) {
		if (p_online(cpuid, cpustate) != -1) {
			fmd_hdl_debug(hdl, "offlined cpu %u\n", cpuid);
			cda_stats.dp_offs.fmds_value.ui64++;
			return;
		}
	}

	fmd_hdl_debug(hdl, "failed to offline %u: %s\n", cpuid,
	    strerror(errno));
	cda_stats.dp_fails.fmds_value.ui64++;
}
コード例 #9
0
ファイル: sched.c プロジェクト: maosi66/illumos-joyent
/* ARGSUSED */
long
lx_sched_getaffinity(uintptr_t pid, uintptr_t len, uintptr_t maskp)
{
	int	sz;
	ulong_t	*lmask, *zmask;
	int	i;

	sz = syscall(SYS_brand, B_GET_AFFINITY_MASK, pid, len, maskp);
	if (sz == -1)
		return (-errno);

	/*
	 * If the target LWP hasn't ever had an affinity mask set, the kernel
	 * will return a mask of all 0's. If that is the case we must build a
	 * default mask that has all valid bits turned on.
	 */
	lmask = SAFE_ALLOCA(sz);
	zmask = SAFE_ALLOCA(sz);
	if (lmask == NULL || zmask == NULL)
		return (-ENOMEM);

	bzero(zmask, sz);

	if (uucopy((void *)maskp, lmask, sz) != 0)
		return (-EFAULT);

	if (bcmp(lmask, zmask, sz) != 0)
		return (sz);

	for (i = 0; i < sz * 8; i++) {
		if (p_online(i, P_STATUS) != -1) {
			lmask[BITINDEX(i)] |= BITSHIFT(i);
		}
	}

	if (uucopy(lmask, (void *)maskp, sz) != 0)
		return (-EFAULT);

	return (sz);
}
コード例 #10
0
  int readCpuCounters(SFLHost_cpu_counters *cpu) {
    int gotData = NO;

    kstat_ctl_t *kc;
    kstat_t *ksp = NULL;
    kstat_named_t *knp;

    kc = kstat_open();
    if (NULL == kc) {
      myLog(LOG_ERR, "readCpuCounters kstat_open() failed");
    } else {
      ksp = kstat_lookup(kc, "unix", 0, "system_misc");
      if (NULL == ksp) {
	myLog(LOG_ERR, "kstat_loockup error (unix:*:system_misc:*)");
      }
    }

    if (NULL != ksp) {
      if (-1 == kstat_read(kc, ksp, NULL)) {
	myLog(LOG_ERR, "kstat_read error (module: %s, name: %s, class: %s)",
	      ksp->ks_module, ksp->ks_name, ksp->ks_class);
      } else {
	// load_one
	knp = kstat_data_lookup(ksp, "avenrun_1min");
	cpu->load_one = (float)knp->value.ui32;

	// load_five
	knp = kstat_data_lookup(ksp, "avenrun_5min");
	cpu->load_five = (float)knp->value.ui32;

	// load_fifteen
	knp = kstat_data_lookup(ksp, "avenrun_15min");
	cpu->load_fifteen = (float)knp->value.ui32;

	// proc_total
	knp = kstat_data_lookup(ksp, "nproc");
	cpu->proc_total = knp->value.ui32;

	// cpu_num
	knp = kstat_data_lookup(ksp, "ncpus");
	cpu->cpu_num = knp->value.ui32;

	// uptime
	knp = kstat_data_lookup(ksp, "boot_time");
	time_t boot = knp->value.ui32;
	time_t uptime = time(NULL) - boot;
	cpu->uptime = uptime;

	gotData = YES;
      }
    }

    ksp = kstat_lookup(kc, "cpu_info", -1, NULL);
    if (NULL == ksp) {
      myLog(LOG_ERR, "kstat_loockup error (cpu_info:*:cpu_info0:*)");
    }

    if (NULL != ksp) {
      if (-1 == kstat_read(kc, ksp, NULL)) {
	myLog(LOG_ERR, "kstat_read error (module: %s, name: %s, class: %s)",
	      ksp->ks_module, ksp->ks_name, ksp->ks_class);
      } else {

	// cpu_speed
	knp = kstat_data_lookup(ksp, "clock_MHz");
	cpu->cpu_speed = (uint32_t)knp->value.i32;

	gotData = YES;
      }
    }

    // running processes
    int running = runningProcesses();
    if(running > 0) {
      cpu->proc_run = running;
      gotData = YES;
    }

    // From Ganglia's libmetrics
#define CPUSTATES	5
#define CPUSTATE_IDLE	0
#define CPUSTATE_USER	1
#define CPUSTATE_KERNEL	2
#define CPUSTATE_IOWAIT	3
#define CPUSTATE_SWAP	4

    cpu_stat_t cpu_stat;
    int cpu_id = sysconf(_SC_NPROCESSORS_ONLN);
    uint64_t cpu_info[CPUSTATES] = { 0 };
    long stathz = sysconf(_SC_CLK_TCK);
    uint64_t interrupts = 0;
    uint64_t contexts = 0;
#ifndef KSNAME_BUFFER_SIZE
#define KSNAME_BUFFER_SIZE 32
#endif

#define STATHZ_TO_MS(t) (((t) * 1000) / stathz)

    char ks_name[KSNAME_BUFFER_SIZE];
    int i, n;
    for (i = 0; cpu_id > 0; i++) {
      n = p_online(i, P_STATUS);
      if (1 == n || (-1 == n && EINVAL == errno)) {
	continue;
      }

      snprintf(ks_name, KSNAME_BUFFER_SIZE, "cpu_stat%d", i);
      cpu_id--;

      ksp = kstat_lookup(kc, "cpu_stat", i, ks_name);
      if (NULL == ksp) {
	myLog(LOG_ERR, "kstat_lookup error (module: cpu_stat, inst: %d, name %s)", i, ks_name);
	continue;
      }

      if (-1 == kstat_read(kc, ksp, &cpu_stat)) {
	myLog(LOG_ERR, "kstat_read error (module: %s, name: %s, class: %s)",
	      ksp->ks_module, ksp->ks_name, ksp->ks_class);
	continue;
      }

      if(debug>1) {
	myLog(LOG_INFO, "adding cpu stats for cpu=%d (idle=%u user=%u wait=%u swap=%u kernel=%u)",
	      cpu_id,
	      cpu_stat.cpu_sysinfo.cpu[CPU_IDLE],
	      cpu_stat.cpu_sysinfo.cpu[CPU_USER],
	      cpu_stat.cpu_sysinfo.wait[W_IO] + cpu_stat.cpu_sysinfo.wait[W_PIO],
	      cpu_stat.cpu_sysinfo.wait[W_SWAP],
	      cpu_stat.cpu_sysinfo.cpu[CPU_KERNEL]);
      }
	      

      cpu_info[CPUSTATE_IDLE] += cpu_stat.cpu_sysinfo.cpu[CPU_IDLE];
      cpu_info[CPUSTATE_USER] += cpu_stat.cpu_sysinfo.cpu[CPU_USER];
      cpu_info[CPUSTATE_IOWAIT] += cpu_stat.cpu_sysinfo.wait[W_IO] + cpu_stat.cpu_sysinfo.wait[W_PIO];
      cpu_info[CPUSTATE_SWAP] += cpu_stat.cpu_sysinfo.wait[W_SWAP];
      cpu_info[CPUSTATE_KERNEL] += cpu_stat.cpu_sysinfo.cpu[CPU_KERNEL];
      interrupts += cpu_stat.cpu_sysinfo.intr + cpu_stat.cpu_sysinfo.trap;
      contexts += cpu_stat.cpu_sysinfo.pswitch;
      gotData = YES;
    }

    // cpu_user
    cpu->cpu_user = (uint32_t)STATHZ_TO_MS(cpu_info[CPUSTATE_USER]);

    // cpu_nice
    SFL_UNDEF_COUNTER(cpu->cpu_nice);

    // cpu_system
    cpu->cpu_system = (uint32_t)STATHZ_TO_MS(cpu_info[CPUSTATE_KERNEL]);

    // cpu_idle
    cpu->cpu_idle = (uint32_t)STATHZ_TO_MS(cpu_info[CPUSTATE_IDLE]);

    // cpu_wio
    cpu->cpu_wio = (uint32_t)STATHZ_TO_MS(cpu_info[CPUSTATE_IOWAIT]);

    // cpu_intr
    SFL_UNDEF_COUNTER(cpu->cpu_intr);

    // cpu_sintr
    SFL_UNDEF_COUNTER(cpu->cpu_sintr);
	
    // interrupts
    cpu->interrupts = interrupts;

    // contexts
    cpu->contexts = contexts;

    if(kc) kstat_close(kc);
    return gotData;
  }
コード例 #11
0
RTDECL(bool) RTMpIsCpuPresent(RTCPUID idCpu)
{
    int iStatus = p_online(idCpu, P_STATUS);
    return iStatus != -1;
}
コード例 #12
0
RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
{
    int iStatus = p_online(idCpu, P_STATUS);
    return iStatus == P_ONLINE
        || iStatus == P_NOINTR;
}
コード例 #13
0
ファイル: ports.c プロジェクト: Eddy805/HandBrake
/************************************************************************
 * hb_get_cpu_count()
 ************************************************************************
 * Whenever possible, returns the number of CPUs on the current
 * computer. Returns 1 otherwise.
 * The detection is actually only performed on the first call.
 ************************************************************************/
int hb_get_cpu_count()
{
    static int cpu_count = 0;

    if( cpu_count )
    {
        return cpu_count;
    }
    cpu_count = 1;

#if defined(SYS_CYGWIN) || defined(SYS_MINGW)
    SYSTEM_INFO cpuinfo;
    GetSystemInfo( &cpuinfo );
    cpu_count = cpuinfo.dwNumberOfProcessors;

#elif defined(SYS_LINUX)
    unsigned int bit;
    cpu_set_t p_aff;
    memset( &p_aff, 0, sizeof(p_aff) );
    sched_getaffinity( 0, sizeof(p_aff), &p_aff );
    for( cpu_count = 0, bit = 0; bit < sizeof(p_aff); bit++ )
         cpu_count += (((uint8_t *)&p_aff)[bit / 8] >> (bit % 8)) & 1;

#elif defined(SYS_BEOS)
    system_info info;
    get_system_info( &info );
    cpu_count = info.cpu_count;

#elif defined(SYS_DARWIN) || defined(SYS_FREEBSD) || defined(SYS_OPENBSD)
    size_t length = sizeof( cpu_count );
#ifdef SYS_OPENBSD
    int mib[2] = { CTL_HW, HW_NCPU };
    if( sysctl(mib, 2, &cpu_count, &length, NULL, 0) )
#else
    if( sysctlbyname("hw.ncpu", &cpu_count, &length, NULL, 0) )
#endif
    {
        cpu_count = 1;
    }

#elif defined( SYS_SunOS )
    {
        processorid_t cpumax;
        int i,j=0;

        cpumax = sysconf(_SC_CPUID_MAX);

        for(i = 0; i <= cpumax; i++ )
        {
            if(p_online(i, P_STATUS) != -1)
            {
                j++;
            }
        }
        cpu_count=j;
    }
#endif

    cpu_count = MAX( 1, cpu_count );
    cpu_count = MIN( cpu_count, 64 );

    return cpu_count;
}
コード例 #14
0
ファイル: psradm.c プロジェクト: tsoome/illumos-gate
int
main(int argc, char *argv[])
{
	int	c;
	int	action = 0;
	processorid_t	cpu;
	processorid_t	cpuid_max;
	char	*errptr;
	int	errors;
	psr_action_t	*pac;
	bool disable_smt = 0;

	cmdname = basename(argv[0]);

	while ((c = getopt(argc, argv, "afFinsSv")) != EOF) {
		switch (c) {

		case 'a':		/* applies to all possible CPUs */
			all_flag = 1;
			break;

		case 'F':
			force = 1;
			break;

		case 'S':
			disable_smt = 1;
			break;

		case 'f':
		case 'i':
		case 'n':
		case 's':
			if (action != 0 && action != c) {
				(void) fprintf(stderr,
				    "%s: options -f, -n, -i, and -s are "
				    "mutually exclusive.\n", cmdname);
				usage();
				return (2);
			}
			action = c;
			break;

		case 'v':
			verbose = 1;
			break;

		default:
			usage();
			return (2);
		}
	}

	if (disable_smt) {
		if (!all_flag) {
			fprintf(stderr, "%s: -S must be used with -a.\n",
			    cmdname);
			usage();
			return (2);
		}

		if (force || action != 0 || argc != optind) {
			usage();
			return (2);
		}

		if (p_online(P_ALL_SIBLINGS, P_DISABLED) == -1) {
			fprintf(stderr, "Failed to disable simultaneous "
			    "multi-threading: %s\n", strerror(errno));
			return (EXIT_FAILURE);
		}

		return (EXIT_SUCCESS);
	}

	switch (action) {
	case 'f':
		action = P_OFFLINE;
		break;
	case 'i':
		action = P_NOINTR;
		break;
	case 'n':
		action = P_ONLINE;
		break;
	case 's':
		action = P_SPARE;
		break;
	default:
		if (force != 0) {
			/*
			 * The -F option without other transition options
			 * puts processor(s) into faulted state.
			 */
			action = P_FAULTED;
			break;
		}
		(void) fprintf(stderr,
		    "%s: option -f, -n, -s or -i must "
		    "be specified.\n", cmdname);
		usage();
		return (2);
	}

	pac = psr_action_lookup(action);
	assert(pac != NULL);

	errors = 0;
	if (all_flag) {
		if (argc != optind) {
			usage();
			return (2);
		}
		cpuid_max = (processorid_t)sysconf(_SC_CPUID_MAX);
		for (cpu = 0; cpu <= cpuid_max; cpu++) {
			if (psr_set_state(cpu, action, pac, 0) < 0)
				errors = 1;
		}
	} else {
		argc -= optind;
		if (argc <= 0) {
			usage();	/* not enough arguments */
			return (2);
		}
		for (argv += optind; argc > 0; argv++, argc--) {
			if (strchr(*argv, '-') == NULL) {
				/* individual processor id */
				cpu = (processorid_t)
				    strtol(*argv, &errptr, 10);
				if (errptr != NULL && *errptr != '\0') {
					(void) fprintf(stderr,
					    "%s: invalid processor"
					    " ID %s\n", cmdname, *argv);
					errors = 2;
					continue;
				}
				if (psr_set_state(cpu, action, pac, 1) < 0)
					errors = 1;
			} else {
				/* range of processors */
				processorid_t first, last;

				first = (processorid_t)
				    strtol(*argv, &errptr, 10);
				if (*errptr++ != '-') {
					(void) fprintf(stderr,
					    "%s: invalid processor"
					    " range %s\n", cmdname, *argv);
					errors = 2;
					continue;
				}
				last = (processorid_t)
				    strtol(errptr, &errptr, 10);
				if ((errptr != NULL && *errptr != '\0') ||
				    last < first || first < 0) {
					(void) fprintf(stderr,
					    "%s: invalid processor"
					    " range %s\n", cmdname, *argv);
					errors = 2;
					continue;
				}
				if (do_range(first, last, action, pac))
					errors = 1;
			}
		}
	}
	if (log_open) {
		closelog();
	}
	return (errors);
}
コード例 #15
0
ファイル: psradm.c プロジェクト: tsoome/illumos-gate
static int
psr_set_state(processorid_t cpu, int action, psr_action_t *pac, int mustexist)
{
	int	old_state;
	int	err;
	time_t	now;
	char	buf[80];

	old_state = p_online(cpu, P_STATUS);
	if (old_state < 0) {
		if (errno == EINVAL && !mustexist)
			return (0);	/* no such processor */
		err = errno;		/* in case sprintf smashes errno */
		(void) snprintf(buf, sizeof (buf), "%s: processor %d",
		    cmdname, cpu);
		errno = err;
		perror(buf);
		return (-1);
	}

	if (old_state == P_FAULTED && action != P_FAULTED && !force) {
		(void) printf("%s: processor %d in faulted state; "
		    "add -F option to force change\n", cmdname, cpu);
		return (-1);
	}

	old_state = p_online(cpu, force ? action | P_FORCED : action);
	if (old_state < 0) {
		if (errno == EINVAL && !mustexist)
			return (0);	/* no such processor */
		err = errno;
		(void) snprintf(buf, sizeof (buf), "%s: processor %d",
		    cmdname, cpu);
		errno = err;
		perror(buf);
		return (-1);
	}
	if (old_state == action) {
		if (verbose)
			(void) printf("processor %d already %s.\n", cpu,
			    pac->p_state);
		return (1);		/* no change */
	}

	(void) snprintf(buf, sizeof (buf), "processor %d %s %s.",
	    cpu, pac->p_action, pac->p_state);

	if (verbose)
		(void) printf("%s\n", buf);

	/*
	 * Log the change.
	 */
	if (!log_open) {
		log_open = 1;
		openlog(cmdname, LOG_CONS, LOG_USER);	/* open syslog */
		(void) setlogmask(LOG_UPTO(LOG_INFO));

		ut.ut_pid = getpid();
		ut.ut_type = USER_PROCESS;
		(void) strncpy(ut.ut_user, "psradm", sizeof (ut.ut_user) - 1);
	}

	syslog(LOG_INFO, "%s", buf);

	/*
	 * Update wtmp.
	 */
	(void) snprintf(ut.ut_line, sizeof (ut.ut_line), PSRADM_MSG,
	    cpu, pac->p_wtmp);
	(void) time(&now);
	ut.ut_xtime = now;
	updwtmpx(WTMPX_FILE, &ut);

	return (1);	/* the processor exists and no errors occurred */
}
コード例 #16
0
ファイル: sched.c プロジェクト: maosi66/illumos-joyent
/* ARGSUSED */
long
lx_sched_setaffinity(uintptr_t pid, uintptr_t len, uintptr_t maskp)
{
	int		ret;
	int		sz;
	int		i;
	int		found;
	ulong_t		*lmask;
	pid_t		s_pid;
	lwpid_t		s_tid;
	processorid_t	cpuid = NULL;

	if ((pid_t)pid < 0)
		return (-EINVAL);

	if (lx_lpid_to_spair(pid, &s_pid, &s_tid) < 0)
		return (-ESRCH);

	/*
	 * We only support setting affinity masks for threads in
	 * the calling process.
	 */
	if (s_pid != getpid())
		return (-EPERM);

	/*
	 * First, get the minimum bitmask size from the kernel.
	 */
	sz = syscall(SYS_brand, B_GET_AFFINITY_MASK, 0, 0, 0);
	if (sz == -1)
		return (-errno);

	lmask = SAFE_ALLOCA(sz);
	if (lmask == NULL)
		return (-ENOMEM);

	if (uucopy((void *)maskp, lmask, sz) != 0)
		return (-EFAULT);

	/*
	 * Make sure the mask contains at least one processor that is
	 * physically on the system. Reduce the user's mask to the set of
	 * physically present CPUs. Keep track of how many valid
	 * bits are set in the user's mask.
	 */

	for (found = 0, i = 0; i < sz * 8; i++) {
		if (p_online(i, P_STATUS) == -1) {
			/*
			 * This CPU doesn't exist, so clear this bit from
			 * the user's mask.
			 */
			lmask[BITINDEX(i)] &= ~BITSHIFT(i);
			continue;
		}

		if ((lmask[BITINDEX(i)] & BITSHIFT(i)) == BITSHIFT(i)) {
			found++;
			cpuid = i;
		}
	}

	if (found == 0) {
		lx_debug("\tlx_sched_setaffinity: mask has no present CPUs\n");
		return (-EINVAL);
	}

	/*
	 * If only one bit is set, bind the thread to that procesor;
	 * otherwise, clear the binding.
	 */
	if (found == 1) {
		lx_debug("\tlx_sched_setaffinity: binding thread %d to cpu%d\n",
		    s_tid, cpuid);
		if (processor_bind(P_LWPID, s_tid, cpuid, NULL) != 0)
			/*
			 * It could be that the requested processor is offline,
			 * so we'll just abandon our good-natured attempt to
			 * bind to it.
			 */
			lx_debug("couldn't bind LWP %d to cpu %d: %s\n", s_tid,
			    cpuid, strerror(errno));
	} else {
		lx_debug("\tlx_sched_setaffinity: clearing thr %d binding\n",
		    s_tid);
		if (processor_bind(P_LWPID, s_tid, PBIND_NONE, NULL) != 0) {
			lx_debug("couldn't clear CPU binding for LWP %d: %s\n",
			    s_tid, strerror(errno));
		}
	}

	/*
	 * Finally, ask the kernel to make a note of our current (though fairly
	 * meaningless) affinity mask.
	 */
	ret = syscall(SYS_brand, B_SET_AFFINITY_MASK, pid, sz, lmask);

	return ((ret == 0) ? 0 : -errno);
}
コード例 #17
0
main(int argc, char** argv)
{
  if(argc != 4){
    fprintf(stderr, "usage: uncontended_locks <num procs> <num locks> <acquires>\n");
    exit(0);
  }

#ifdef DEBUG
  printf("starting uncontended locks microbenchmark\n");
#endif

  thread_t* threads;  
  
  // parse num procs, stride and iterations
  numProcs = atoi(argv[1]);
  numLocks = atoi(argv[2]);
  acquires = atoi(argv[3]);

  int maxNumProcs = sysconf(_SC_NPROCESSORS_ONLN);
  processorIds = new int[maxNumProcs];
  
  assert(numProcs <= maxNumProcs);
  
  // Initialize thread to processor bindings
  int procId = 0;
  for(int i=0; i<numProcs; ++i){
    int cont = 1;
    for(; cont; ++procId){
      int status = p_online(procId, P_STATUS);
      if (status == P_ONLINE){
	cont = 0;
	processorIds[i] = procId; 
      }
    }
  }

  // Initialize array of thread structures
  threads = (thread_t *) malloc(sizeof(thread_t) * numProcs);
  assert(threads != NULL);
    
  // Initialize random lock access order array
  access_order = new int*[numProcs];
  for(int i=0; i<numProcs; ++i){
    access_order[i] = new int[acquires];
    for(int j=0; j<acquires; ++j){
      int tmp = rand();
      access_order[i][j] = tmp % numLocks;
    }
  }

  // Initialize Lock and Counter arrays
  lock_array = new Lock[numLocks];
  counter_array = new Counter[numLocks];
  
  for(int i=0; i<numLocks; ++i){
    lock_array[i].lock_var = 0;
    counter_array[i].count_var = 0;
  }

#ifdef DEBUG
  printf("About to create threads\n");
#endif

  // create the threads
  int ret;
  for(int dx=0; dx < numProcs-1; dx++) {
    ret = thr_create(NULL, 0, thread_loop, (void *) dx, THR_BOUND,
		     &threads[dx]);    
    assert(ret == 0); 
  }
  
  // magic break to clear ruby stats
  RUBY_MAGIC_CALL(Do_Breakpoint);
  thread_loop((void*) (numProcs-1));

  // Wait for each of the threads to terminate 
  for(int dx=0; dx < numProcs-1; dx++) {    
    ret = thr_join(threads[dx], NULL, NULL);
    assert(ret == 0);  
  }  


}
コード例 #18
0
ファイル: cpustat.c プロジェクト: AlainODea/illumos-gate
static int
cpustat(void)
{
	cpc_setgrp_t	*accum;
	cpc_set_t	*start;
	int		c, i, retval;
	int		lwps = 0;
	psetid_t	mypset, cpupset;
	char		*errstr;
	cpc_buf_t	**data1, **data2, **scratch;
	int		nreqs;
	kstat_ctl_t	*kc;

	ncpus = (int)sysconf(_SC_NPROCESSORS_CONF);
	if ((gstate = calloc(ncpus, sizeof (*gstate))) == NULL) {
		(void) fprintf(stderr, gettext(
		    "%s: out of heap\n"), opts->pgmname);
		return (1);
	}

	max_chip_id = sysconf(_SC_CPUID_MAX);
	if ((chip_designees = malloc(max_chip_id * sizeof (int))) == NULL) {
		(void) fprintf(stderr, gettext(
		    "%s: out of heap\n"), opts->pgmname);
		return (1);
	}
	for (i = 0; i < max_chip_id; i++)
		chip_designees[i] = -1;

	if (smt) {
		if ((kc = kstat_open()) == NULL) {
			(void) fprintf(stderr, gettext(
			    "%s: kstat_open() failed: %s\n"), opts->pgmname,
			    strerror(errno));
			return (1);
		}
	}

	if (opts->dosoaker)
		if (priocntl(0, 0, PC_GETCID, &fxinfo) == -1) {
			(void) fprintf(stderr, gettext(
			    "%s: couldn't get FX scheduler class: %s\n"),
			    opts->pgmname, strerror(errno));
			return (1);
		}

	/*
	 * Only include processors that are participating in the system
	 */
	for (c = 0, i = 0; i < ncpus; c++) {
		switch (p_online(c, P_STATUS)) {
		case P_ONLINE:
		case P_NOINTR:
			if (smt) {

				gstate[i].chip_id = get_chipid(kc, c);
				if (gstate[i].chip_id != -1 &&
				    chip_designees[gstate[i].chip_id] == -1)
					chip_designees[gstate[i].chip_id] = c;
			}

			gstate[i++].cpuid = c;
			break;
		case P_OFFLINE:
		case P_POWEROFF:
		case P_FAULTED:
		case P_SPARE:
			gstate[i++].cpuid = -1;
			break;
		default:
			gstate[i++].cpuid = -1;
			(void) fprintf(stderr,
			    gettext("%s: cpu%d in unknown state\n"),
			    opts->pgmname, c);
			break;
		case -1:
			break;
		}
	}

	/*
	 * Examine the processor sets; if we're in one, only attempt
	 * to report on the set we're in.
	 */
	if (pset_bind(PS_QUERY, P_PID, P_MYID, &mypset) == -1) {
		errstr = strerror(errno);
		(void) fprintf(stderr, gettext("%s: pset_bind - %s\n"),
		    opts->pgmname, errstr);
	} else {
		for (i = 0; i < ncpus; i++) {
			struct tstate *this = &gstate[i];

			if (this->cpuid == -1)
				continue;

			if (pset_assign(PS_QUERY,
			    this->cpuid, &cpupset) == -1) {
				errstr = strerror(errno);
				(void) fprintf(stderr,
				    gettext("%s: pset_assign - %s\n"),
				    opts->pgmname, errstr);
				continue;
			}

			if (mypset != cpupset)
				this->cpuid = -1;
		}
	}

	if (opts->dotitle)
		print_title(opts->master);
	zerotime();

	for (i = 0; i < ncpus; i++) {
		struct tstate *this = &gstate[i];

		if (this->cpuid == -1)
			continue;
		this->sgrp = cpc_setgrp_clone(opts->master);
		if (this->sgrp == NULL) {
			this->cpuid = -1;
			continue;
		}
		if (thr_create(NULL, 0, gtick, this,
		    THR_BOUND|THR_NEW_LWP, &this->tid) == 0)
			lwps++;
		else {
			(void) fprintf(stderr,
			    gettext("%s: cannot create thread for cpu%d\n"),
			    opts->pgmname, this->cpuid);
			this->status = 4;
		}
	}

	if (lwps != 0)
		for (i = 0; i < ncpus; i++)
			(void) thr_join(gstate[i].tid, NULL, NULL);

	if ((accum = cpc_setgrp_clone(opts->master)) == NULL) {
		(void) fprintf(stderr, gettext("%s: out of heap\n"),
		    opts->pgmname);
		return (1);
	}

	retval = 0;
	for (i = 0; i < ncpus; i++) {
		struct tstate *this = &gstate[i];

		if (this->cpuid == -1)
			continue;
		cpc_setgrp_accum(accum, this->sgrp);
		cpc_setgrp_free(this->sgrp);
		this->sgrp = NULL;
		if (this->status != 0)
			retval = 1;
	}

	cpc_setgrp_reset(accum);
	start = cpc_setgrp_getset(accum);
	do {
		nreqs = cpc_setgrp_getbufs(accum, &data1, &data2, &scratch);
		print_total(lwps, *data1, nreqs, cpc_setgrp_getname(accum));
	} while (cpc_setgrp_nextset(accum) != start);

	cpc_setgrp_free(accum);
	accum = NULL;

	free(gstate);
	return (retval);
}
コード例 #19
0
/** schedule_tasks()
 *  thread_func - function pointer to process splitter data
 *  splitter_func - splitter function pointer
 *  splitter_init - splitter_init function pointer
 *  runs map tasks in a new thread on each the available processors.
 *  returns pointer intermediate value array 
 */
static inline void schedule_tasks(thread_wrapper_arg_t *th_arg)
{
   assert(th_arg);

   pthread_attr_t attr;   // parameter for pthread creation
   thread_wrapper_arg_t * curr_th_arg; // arg for thread_wrapper()
   
   int thread_cnt;        // counter of number threads assigned assigned
   int curr_proc;
   int curr_thread;

   int num_threads = getNumTaskThreads(th_arg->func_type);
   int threads_per_proc = num_threads / g_state.num_procs; 
   int threads_mod_procs = num_threads % g_state.num_procs;

   int pos = 0; // position of next result in the array
   pthread_mutex_t splitter_lock; // lock for splitter function

   g_state.tinfo = (thread_info_t *)CALLOC(num_threads, sizeof(thread_info_t));
   CHECK_ERROR(pthread_mutex_init(&splitter_lock, NULL) != 0);   
   
   dprintf("Number of available processors = %d\n", g_state.num_procs);
   dprintf("Number of Threads to schedule = %d per(%d) mod(%d)\n", 
      num_threads, threads_per_proc, threads_mod_procs);

   th_arg->pos = &pos;
   th_arg->splitter_lock = &splitter_lock;
   
   // thread must be scheduled systemwide
   pthread_attr_init(&attr);
   pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);

#ifdef _LINUX_
   unsigned long cpu_set; // bit array of available processors
   // Create a thread for each availble processor to handle the split data
   CHECK_ERROR(sched_getaffinity(0, sizeof(cpu_set), &cpu_set) == -1);
   for (thread_cnt = curr_proc = 0; 
        curr_proc < sizeof(cpu_set) && thread_cnt < num_threads; 
        curr_proc++)
   {
      if (isCpuAvailable(cpu_set, curr_proc))
      {
#endif
#ifdef _SOLARIS_
   int max_procs = sysconf(_SC_NPROCESSORS_ONLN);
   for (thread_cnt = curr_proc = 0; thread_cnt < num_threads; curr_proc++)
   {
      if (P_ONLINE == p_online(curr_proc, P_STATUS))
      {
#endif
         
         for (curr_thread = !(threads_mod_procs-- > 0); 
              curr_thread <= threads_per_proc && thread_cnt < num_threads; 
              curr_thread++, thread_cnt++)
         {
            // Setup data to be passed to each thread
            curr_th_arg = (thread_wrapper_arg_t*)MALLOC(sizeof(thread_wrapper_arg_t));
            memcpy(curr_th_arg, th_arg, sizeof(thread_wrapper_arg_t));
            curr_th_arg->cpu_id = curr_proc;

            g_state.tinfo[thread_cnt].cpuid = curr_proc;

            //fprintf(stderr, "Starting thread %d on cpu %d\n", thread_cnt, curr_th_arg->cpu_id);
            switch (th_arg->func_type)
            {
            case MAP:
               CHECK_ERROR(pthread_create(&g_state.tinfo[thread_cnt].tid, &attr, 
                                                map_worker, curr_th_arg) != 0);
               break;
            case REDUCE:
               CHECK_ERROR(pthread_create(&g_state.tinfo[thread_cnt].tid, &attr, 
                                                reduce_worker, curr_th_arg) != 0);
               break;
            case MERGE:
               CHECK_ERROR(pthread_create(&g_state.tinfo[thread_cnt].tid, &attr, 
                                                merge_worker, curr_th_arg) != 0);
               break;
            default:
               assert(0);
               break;
            }
         }
      }
      
      /*** ADDED BY RAM TO ASSIGN EACH PTHREAD TO HARDWARE THREADS ON DIFFERENT
      PROCESSORS ON THE ULTRASPARC T1 ****/
      if (getenv("MR_AFARA") != NULL)
      {
         //fprintf(stderr, "Using sparse threads\n");
         curr_proc += 3;
         if (curr_proc >= max_procs-1) {
            curr_proc++;
            curr_proc = curr_proc % max_procs; 
         }
      }
   }

   
   dprintf("Status: All %d threads have been created\n", num_threads);
   
   // barrier, wait for all threads to finish           
   for (thread_cnt = 0; thread_cnt < num_threads; thread_cnt++)
   {
      int ret_val;
      CHECK_ERROR(pthread_join(g_state.tinfo[thread_cnt].tid, (void **)(void *)&ret_val) != 0);
      
      // The thread returned and error. Restart the thread.
      //if (ret_val != 0)
      //{
      //}
   }
   
   pthread_attr_destroy(&attr);
   free(g_state.tinfo);
   dprintf("Status: All tasks have completed\n"); 
   
   return;
}

/** map_worker()
* args - pointer to thread_wrapper_arg_t
* returns 0 on success
* This runs thread_func() until there is no more data from the splitter().
* The pointer to results are stored in return_values array.
*/
static void *map_worker(void *args) 
{
   thread_wrapper_arg_t *th_arg = (thread_wrapper_arg_t *)args;
   int thread_index = getCurrThreadIndex(MAP);
   map_args_t thread_func_arg;
   int num_assigned = 0;
   int ret; // return value of splitter func. 0 = no more data to provide
   int isOneQueuePerTask = g_state.isOneQueuePerTask;

   assert(th_arg);
   
#ifdef _LINUX_
   // Bind thread to run on cpu_id
   unsigned long cpu_set = 0;
   setCpuAvailable(&cpu_set, th_arg->cpu_id);
   CHECK_ERROR(sched_setaffinity(0, sizeof(cpu_set), &cpu_set) != 0);
#endif

#ifdef _SOLARIS_
   dprintf("Binding thread to processor %d\n", th_arg->cpu_id);
   CHECK_ERROR(processor_bind(P_LWPID, P_MYID, th_arg->cpu_id, NULL)!= 0);
   /*if (processor_bind(P_LWPID, P_MYID, th_arg->cpu_id, NULL)!= 0) {
      switch(errno)
      {
         case EFAULT: dprintf("EFAULT\n");
                        break;
         case EINVAL: dprintf("EINVAL\n");
                        break;
         case EPERM:  dprintf("EPERM\n");
                        break;
         case ESRCH:  dprintf("ESRCH\n");
                        break;
         default: dprintf("Errno is %d\n",errno);
         
      }
   }*/
#endif

   while (1)
   {
      pthread_mutex_lock(th_arg->splitter_lock);
            
      ret = g_state.splitter(g_state.args->task_data, g_state.chunk_size, &thread_func_arg);
      if (ret != 0) 
      {
         int alloc_len = g_state.intermediate_task_alloc_len;
         g_state.tinfo[thread_index].curr_task = g_state.map_tasks++;
         num_assigned++;

         if (isOneQueuePerTask && g_state.map_tasks > alloc_len)
         {
            dprintf("MAP TASK QUEUE REALLOC\n");
            int i;

            g_state.intermediate_task_alloc_len *= 2;

            for (i = 0; i < g_state.reduce_tasks; i++)
            {
               g_state.intermediate_vals[i] = (keyvals_arr_t *)REALLOC(
                  g_state.intermediate_vals[i], 
                  g_state.intermediate_task_alloc_len*sizeof(keyvals_arr_t));
               memset(&g_state.intermediate_vals[i][alloc_len], 0, 
                  alloc_len*sizeof(keyvals_arr_t));
            }
         }
      }
      
      pthread_mutex_unlock(th_arg->splitter_lock);

      // Stop if there is no more data
      if (ret == 0) break;
      
      dprintf("Task %d: cpu_id -> %d - Started\n", num_assigned, th_arg->cpu_id);

      g_state.args->map(&thread_func_arg);

      dprintf("Task %d: cpu_id -> %d - Done\n", num_assigned, th_arg->cpu_id);
   }

   dprintf("Status: Total of %d tasks were assigned to cpu_id %d\n", 
      num_assigned, th_arg->cpu_id);

   free(args);
   
   return (void *)0;
}


static void *reduce_worker(void *args)
{
   thread_wrapper_arg_t *th_arg = (thread_wrapper_arg_t *)args;   
   int thread_index = getCurrThreadIndex(REDUCE);
   int isOneQueuePerTask = g_state.isOneQueuePerTask;
   
   assert(th_arg);
   
	#ifdef _LINUX_
	   // Bind thread to run on cpu_id
	   unsigned long cpu_set = 0;
	   setCpuAvailable(&cpu_set, th_arg->cpu_id);
	   CHECK_ERROR(sched_setaffinity(0, sizeof(cpu_set), &cpu_set) != 0);
	#endif

	#ifdef _SOLARIS_
	   CHECK_ERROR(processor_bind(P_LWPID, P_MYID, th_arg->cpu_id, NULL)!= 0);
	   /*if (processor_bind(P_LWPID, P_MYID, th_arg->cpu_id, NULL)!= 0) {
	      switch(errno)
         {
            case EFAULT: dprintf("EFAULT\n");
                           break;
            case EINVAL: dprintf("EINVAL\n");
                           break;
            case EPERM:  dprintf("EPERM\n");
                           break;
            case ESRCH:  dprintf("ESRCH\n");
                           break;
            default: dprintf("Errno is %d\n",errno);
            
         }   
	   }*/
	#endif

	int curr_thread, done;
   int curr_reduce_task = 0;
   int ret;
   int num_map_threads;
   if (isOneQueuePerTask)
      num_map_threads = g_state.map_tasks;
   else
      num_map_threads = g_state.num_map_threads;

   int startsize = DEFAULT_VALS_ARR_LEN;
   keyvals_arr_t* thread_array;
   int vals_len, max_len, next_min_pos;
   keyvals_t *curr_key_val, *min_key_val, *next_min;

   int * thread_position = (int *)MALLOC(num_map_threads * sizeof(int));
   void** vals = MALLOC(sizeof(char*)*startsize);

   while (1)
   {
      // Get the next reduce task
	   pthread_mutex_lock(th_arg->splitter_lock);
   	
      ret = (*th_arg->pos >= g_state.reduce_tasks);
      if (!ret)
      {
	      g_state.tinfo[thread_index].curr_task = curr_reduce_task = 
	                                                      (*th_arg->pos)++;
      }
      pthread_mutex_unlock(th_arg->splitter_lock);

      // No more reduce tasks
      if(ret) break;
   	
	   bzero((char *)thread_position, num_map_threads*sizeof(int));

	   vals_len = 0;
		max_len = startsize;
		
      min_key_val = NULL;
		next_min =  NULL;
      done = 0;

      while (!done)
      {
         for (curr_thread = 0; curr_thread < num_map_threads; curr_thread++)
         {
		      /* Find the next array to search */
		      thread_array = 
               &g_state.intermediate_vals[curr_reduce_task][curr_thread];

		      /* Check if the current processor array has been completely searched */
            if (thread_position[curr_thread] >= thread_array->len) continue;
         
		      /* Get the next key in the processor array */
		      curr_key_val = &thread_array->arr[thread_position[curr_thread]];

		      /* If the key matches the minimum value. Then add the value to the 
               list of values for that key */
            if (min_key_val != NULL && 
                !g_state.args->key_cmp(curr_key_val->key, min_key_val->key))
            {
               if (g_state.reduce == identity_reduce)
               {
                  int j;
                  for (j = 0; j < curr_key_val->len; j++)
                  {
				         emit_inline(min_key_val->key, curr_key_val->vals[j]);
                  }
               }
               else
               {
                  if (vals_len + curr_key_val->len >= max_len)
                  {
                     while (vals_len + curr_key_val->len >= max_len)
				            max_len *= 2;
   				      
                     vals = REALLOC(vals, sizeof(char*)*(max_len));
                  }
				      memcpy(&vals[vals_len], curr_key_val->vals, 
                        curr_key_val->len*sizeof(char*));
				      vals_len += curr_key_val->len;
               }

               thread_position[curr_thread--]++;
            }
		      /* Find the location of the next min */	
            else if (next_min == NULL || 
                     g_state.args->key_cmp(curr_key_val->key, next_min->key) < 0)
            {
               next_min = curr_key_val;
               next_min_pos = curr_thread;
            }
         }

	      if(min_key_val != NULL)
	      {
            if (g_state.reduce != identity_reduce)
            {
		         g_state.reduce(min_key_val->key, vals, vals_len);
               
            }

            vals_len = 0;
	         min_key_val = NULL;
         }

         if (next_min != NULL)
         {
            min_key_val = next_min;
            next_min = NULL;
         }
         
         // See if there are any elements left
         for(curr_thread = 0; curr_thread < num_map_threads && 
             thread_position[curr_thread] >= 
             g_state.intermediate_vals[curr_reduce_task][curr_thread].len; 
             curr_thread++);
	      done = (curr_thread == num_map_threads);
      }
      
      for (curr_thread = 0; curr_thread < num_map_threads; curr_thread++)
      {
         keyvals_arr_t * arr = &g_state.intermediate_vals[curr_reduce_task][curr_thread];
         int j;
         for(j = 0; j < arr->len; j++)
         {
            free(arr->arr[j].vals);
         }
         free(arr->arr);
      }
      free(g_state.intermediate_vals[curr_reduce_task]);
   }

   free(thread_position);
   free(vals);
   free(args);

   return (void *)0;
}