Пример #1
0
/*
 * Avoid inlining the function to keep vdev_mirror_io_start(), which
 * is this functions only caller, as small as possible on the stack.
 */
noinline static mirror_map_t *
vdev_mirror_map_alloc(zio_t *zio)
{
	mirror_map_t *mm = NULL;
	mirror_child_t *mc;
	vdev_t *vd = zio->io_vd;
	int c, d;

	if (vd == NULL) {
		dva_t *dva = zio->io_bp->blk_dva;
		spa_t *spa = zio->io_spa;

		c = BP_GET_NDVAS(zio->io_bp);

		mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]),
		    KM_PUSHPAGE);
		mm->mm_children = c;
		mm->mm_replacing = B_FALSE;
		mm->mm_preferred = spa_get_random(c);
		mm->mm_root = B_TRUE;

		/*
		 * Check the other, lower-index DVAs to see if they're on
		 * the same vdev as the child we picked.  If they are, use
		 * them since they are likely to have been allocated from
		 * the primary metaslab in use at the time, and hence are
		 * more likely to have locality with single-copy data.
		 */
		for (c = mm->mm_preferred, d = c - 1; d >= 0; d--) {
			if (DVA_GET_VDEV(&dva[d]) == DVA_GET_VDEV(&dva[c]))
				mm->mm_preferred = d;
		}

		for (c = 0; c < mm->mm_children; c++) {
			mc = &mm->mm_child[c];

			mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
			mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
		}
	} else {
		int lowest_pending = INT_MAX;
		int lowest_nr = 1;

		c = vd->vdev_children;

		mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]),
		    KM_PUSHPAGE);
		mm->mm_children = c;
		mm->mm_replacing = (vd->vdev_ops == &vdev_replacing_ops ||
		    vd->vdev_ops == &vdev_spare_ops);
		mm->mm_preferred = 0;
		mm->mm_root = B_FALSE;

		for (c = 0; c < mm->mm_children; c++) {
			mc = &mm->mm_child[c];
			mc->mc_vd = vd->vdev_child[c];
			mc->mc_offset = zio->io_offset;

			if (mm->mm_replacing)
				continue;

			if (!vdev_readable(mc->mc_vd)) {
				mc->mc_error = SET_ERROR(ENXIO);
				mc->mc_tried = 1;
				mc->mc_skipped = 1;
				mc->mc_pending = INT_MAX;
				continue;
			}

			mc->mc_pending = vdev_mirror_pending(mc->mc_vd);
			if (mc->mc_pending < lowest_pending) {
				lowest_pending = mc->mc_pending;
				lowest_nr = 1;
			} else if (mc->mc_pending == lowest_pending) {
				lowest_nr++;
			}
		}

		d = gethrtime() / (NSEC_PER_USEC * zfs_vdev_mirror_switch_us);
		d = (d % lowest_nr) + 1;

		for (c = 0; c < mm->mm_children; c++) {
			mc = &mm->mm_child[c];

			if (mm->mm_child[c].mc_pending == lowest_pending) {
				if (--d == 0) {
					mm->mm_preferred = c;
					break;
				}
			}
		}
	}

	zio->io_vsd = mm;
	zio->io_vsd_ops = &vdev_mirror_vsd_ops;
	return (mm);
}
Пример #2
0
double get_seconds()
{
  hrtime_t t = gethrtime();

  return ((double)t)/1e9;
}
Пример #3
0
/*
 * Synchronize pool configuration to disk.  This must be called with the
 * namespace lock held. Synchronizing the pool cache is typically done after
 * the configuration has been synced to the MOS. This exposes a window where
 * the MOS config will have been updated but the cache file has not. If
 * the system were to crash at that instant then the cached config may not
 * contain the correct information to open the pool and an explicit import
 * would be required.
 */
void
spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent)
{
	spa_config_dirent_t *dp, *tdp;
	nvlist_t *nvl;
	char *pool_name;
	boolean_t ccw_failure;
	int error = 0;

	ASSERT(MUTEX_HELD(&spa_namespace_lock));

	if (rootdir == NULL || !(spa_mode_global & FWRITE))
		return;

	/*
	 * Iterate over all cachefiles for the pool, past or present.  When the
	 * cachefile is changed, the new one is pushed onto this list, allowing
	 * us to update previous cachefiles that no longer contain this pool.
	 */
	ccw_failure = B_FALSE;
	for (dp = list_head(&target->spa_config_list); dp != NULL;
	    dp = list_next(&target->spa_config_list, dp)) {
		spa_t *spa = NULL;
		if (dp->scd_path == NULL)
			continue;

		/*
		 * Iterate over all pools, adding any matching pools to 'nvl'.
		 */
		nvl = NULL;
		while ((spa = spa_next(spa)) != NULL) {
			/*
			 * Skip over our own pool if we're about to remove
			 * ourselves from the spa namespace or any pool that
			 * is readonly. Since we cannot guarantee that a
			 * readonly pool would successfully import upon reboot,
			 * we don't allow them to be written to the cache file.
			 */
			if ((spa == target && removing) ||
			    !spa_writeable(spa))
				continue;

			mutex_enter(&spa->spa_props_lock);
			tdp = list_head(&spa->spa_config_list);
			if (spa->spa_config == NULL ||
			    tdp == NULL ||
			    tdp->scd_path == NULL ||
			    strcmp(tdp->scd_path, dp->scd_path) != 0) {
				mutex_exit(&spa->spa_props_lock);
				continue;
			}

			if (nvl == NULL)
				nvl = fnvlist_alloc();

			if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME)
				pool_name = fnvlist_lookup_string(
				    spa->spa_config, ZPOOL_CONFIG_POOL_NAME);
			else
				pool_name = spa_name(spa);

			fnvlist_add_nvlist(nvl, pool_name, spa->spa_config);
			mutex_exit(&spa->spa_props_lock);
		}

		error = spa_config_write(dp, nvl);
		if (error != 0)
			ccw_failure = B_TRUE;
		nvlist_free(nvl);
	}

	if (ccw_failure) {
		/*
		 * Keep trying so that configuration data is
		 * written if/when any temporary filesystem
		 * resource issues are resolved.
		 */
		if (target->spa_ccw_fail_time == 0) {
			zfs_ereport_post(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE,
			    target, NULL, NULL, NULL, 0, 0);
		}
		target->spa_ccw_fail_time = gethrtime();
		spa_async_request(target, SPA_ASYNC_CONFIG_UPDATE);
	} else {
		/*
		 * Do not rate limit future attempts to update
		 * the config cache.
		 */
		target->spa_ccw_fail_time = 0;
	}

	/*
	 * Remove any config entries older than the current one.
	 */
	dp = list_head(&target->spa_config_list);
	while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) {
		list_remove(&target->spa_config_list, tdp);
		if (tdp->scd_path != NULL)
			spa_strfree(tdp->scd_path);
		kmem_free(tdp, sizeof (spa_config_dirent_t));
	}

	spa_config_generation++;

	if (postsysevent)
		spa_event_notify(target, NULL, NULL, ESC_ZFS_CONFIG_SYNC);
}
Пример #4
0
unsigned long long rdtsc()
{
	return (DWORDLONG) ((double)gethrtime() * (double)processor_speed_to_nsecs);
}
Пример #5
0
/**
 * Initialise monotonic seconds counter.
 */
void
MHD_monotonic_sec_counter_init (void)
{
#ifdef HAVE_CLOCK_GET_TIME
  mach_timespec_t cur_time;
#endif /* HAVE_CLOCK_GET_TIME */
  enum _MHD_mono_clock_source mono_clock_source = _MHD_CLOCK_NO_SOURCE;
#ifdef HAVE_CLOCK_GETTIME
  struct timespec ts;

  mono_clock_id = _MHD_UNWANTED_CLOCK;
#endif /* HAVE_CLOCK_GETTIME */
#ifdef HAVE_CLOCK_GET_TIME
  mono_clock_service = _MHD_INVALID_CLOCK_SERV;
#endif /* HAVE_CLOCK_GET_TIME */

  /* just a little syntactic trick to get the
     various following ifdef's to work out nicely */
  if (0)
    {
    }
  else
#ifdef HAVE_CLOCK_GETTIME
#ifdef CLOCK_MONOTONIC_COARSE
  /* Linux-specific fast value-getting clock */
  /* Can be affected by frequency adjustment and don't count time in suspend, */
  /* but preferred since it's fast */
  if (0 == clock_gettime (CLOCK_MONOTONIC_COARSE,
                          &ts))
    {
      mono_clock_id = CLOCK_MONOTONIC_COARSE;
      mono_clock_start = ts.tv_sec;
      mono_clock_source = _MHD_CLOCK_GETTIME;
    }
  else
#endif /* CLOCK_MONOTONIC_COARSE */
#ifdef CLOCK_MONOTONIC_FAST
  /* FreeBSD/DragonFly fast value-getting clock */
  /* Can be affected by frequency adjustment, but preferred since it's fast */
  if (0 == clock_gettime (CLOCK_MONOTONIC_FAST,
                          &ts))
    {
      mono_clock_id = CLOCK_MONOTONIC_FAST;
      mono_clock_start = ts.tv_sec;
      mono_clock_source = _MHD_CLOCK_GETTIME;
    }
  else
#endif /* CLOCK_MONOTONIC_COARSE */
#ifdef CLOCK_MONOTONIC_RAW
  /* Linux-specific clock */
  /* Not affected by frequency adjustment, but don't count time in suspend */
  if (0 == clock_gettime (CLOCK_MONOTONIC_RAW,
                          &ts))
    {
      mono_clock_id = CLOCK_MONOTONIC_RAW;
      mono_clock_start = ts.tv_sec;
      mono_clock_source = _MHD_CLOCK_GETTIME;
    }
  else
#endif /* CLOCK_MONOTONIC_RAW */
#ifdef CLOCK_BOOTTIME
  /* Linux-specific clock */
  /* Count time in suspend so it's real monotonic on Linux, */
  /* but can be slower value-getting than other clocks */
  if (0 == clock_gettime (CLOCK_BOOTTIME,
                          &ts))
    {
      mono_clock_id = CLOCK_BOOTTIME;
      mono_clock_start = ts.tv_sec;
      mono_clock_source = _MHD_CLOCK_GETTIME;
    }
  else
#endif /* CLOCK_BOOTTIME */
#ifdef CLOCK_MONOTONIC
  /* Monotonic clock */
  /* Widely supported, may be affected by frequency adjustment */
  /* On Linux it's not truly monotonic as it doesn't count time in suspend */
  if (0 == clock_gettime (CLOCK_MONOTONIC,
                          &ts))
    {
      mono_clock_id = CLOCK_MONOTONIC;
      mono_clock_start = ts.tv_sec;
      mono_clock_source = _MHD_CLOCK_GETTIME;
    }
  else
#endif /* CLOCK_BOOTTIME */
#endif /* HAVE_CLOCK_GETTIME */
#ifdef HAVE_CLOCK_GET_TIME
  /* Darwin-specific monotonic clock */
  /* Should be monotonic as clock_set_time function always unconditionally */
  /* failed on latest kernels */
  if ( (KERN_SUCCESS == host_get_clock_service (mach_host_self(),
                                                SYSTEM_CLOCK,
                                                &mono_clock_service)) &&
       (KERN_SUCCESS == clock_get_time (mono_clock_service,
                                        &cur_time)) )
    {
      mono_clock_start = cur_time.tv_sec;
      mono_clock_source = _MHD_CLOCK_GET_TIME;
    }
  else
#endif /* HAVE_CLOCK_GET_TIME */
#ifdef _WIN32
#if _WIN32_WINNT >= 0x0600
  /* W32 Vista or later specific monotonic clock */
  /* Available since Vista, ~15ms accuracy */
  if (1)
    {
      tick_start = GetTickCount64 ();
      mono_clock_source = _MHD_CLOCK_GETTICKCOUNT64;
    }
  else
#else  /* _WIN32_WINNT < 0x0600 */
  /* W32 specific monotonic clock */
  /* Available on Windows 2000 and later */
  if (1)
    {
      LARGE_INTEGER freq;
      LARGE_INTEGER perf_counter;

      QueryPerformanceFrequency (&freq); /* never fail on XP and later */
      QueryPerformanceCounter (&perf_counter); /* never fail on XP and later */
      perf_freq = freq.QuadPart;
      perf_start = perf_counter.QuadPart;
      mono_clock_source = _MHD_CLOCK_PERFCOUNTER;
    }
  else
#endif /* _WIN32_WINNT < 0x0600 */
#endif /* _WIN32 */
#ifdef HAVE_CLOCK_GETTIME
#ifdef CLOCK_HIGHRES
  /* Solaris-specific monotonic high-resolution clock */
  /* Not preferred due to be potentially resource-hungry */
  if (0 == clock_gettime (CLOCK_HIGHRES,
                          &ts))
    {
      mono_clock_id = CLOCK_HIGHRES;
      mono_clock_start = ts.tv_sec;
      mono_clock_source = _MHD_CLOCK_GETTIME;
    }
  else
#endif /* CLOCK_HIGHRES */
#endif /* HAVE_CLOCK_GETTIME */
#ifdef HAVE_GETHRTIME
  /* HP-UX and Solaris monotonic clock */
  /* Not preferred due to be potentially resource-hungry */
  if (1)
    {
      hrtime_start = gethrtime ();
      mono_clock_source = _MHD_CLOCK_GETHRTIME;
    }
  else
#endif /* HAVE_GETHRTIME */
    {
      /* no suitable clock source was found */
      mono_clock_source = _MHD_CLOCK_NO_SOURCE;
    }

#ifdef HAVE_CLOCK_GET_TIME
  if ( (_MHD_CLOCK_GET_TIME != mono_clock_source) &&
       (_MHD_INVALID_CLOCK_SERV != mono_clock_service) )
    {
      /* clock service was initialised but clock_get_time failed */
      mach_port_deallocate (mach_task_self(),
                            mono_clock_service);
      mono_clock_service = _MHD_INVALID_CLOCK_SERV;
    }
#else
  (void) mono_clock_source; /* avoid compiler warning */
#endif /* HAVE_CLOCK_GET_TIME */

  sys_clock_start = time (NULL);
}
Пример #6
0
static void
txg_sync_thread(dsl_pool_t *dp)
{
	spa_t *spa = dp->dp_spa;
	tx_state_t *tx = &dp->dp_tx;
	callb_cpr_t cpr;
	uint64_t start, delta;

#ifdef _KERNEL
	/*
	 * Annotate this process with a flag that indicates that it is
	 * unsafe to use KM_SLEEP during memory allocations due to the
	 * potential for a deadlock.  KM_PUSHPAGE should be used instead.
	 */
	//current->flags |= PF_NOFS;
#endif /* _KERNEL */

	txg_thread_enter(tx, &cpr);

	start = delta = 0;
	for (;;) {
		hrtime_t hrstart;
		txg_history_t *th;
		uint64_t timer, timeout;
		uint64_t txg;

		timeout = zfs_txg_timeout * hz;

		/*
		 * We sync when we're scanning, there's someone waiting
		 * on us, or the quiesce thread has handed off a txg to
		 * us, or we have reached our timeout.
		 */
		timer = (delta >= timeout ? 0 : timeout - delta);
		while (!dsl_scan_active(dp->dp_scan) &&
		    !tx->tx_exiting && timer > 0 &&
		    tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
		    tx->tx_quiesced_txg == 0) {
			dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
			    tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
			txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
			delta = ddi_get_lbolt() - start;
			timer = (delta > timeout ? 0 : timeout - delta);
		}

		/*
		 * Wait until the quiesce thread hands off a txg to us,
		 * prompting it to do so if necessary.
		 */
		while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
			if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
				tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
			cv_broadcast(&tx->tx_quiesce_more_cv);
			txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
		}

		if (tx->tx_exiting)
			txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);

		/*
		 * Consume the quiesced txg which has been handed off to
		 * us.  This may cause the quiescing thread to now be
		 * able to quiesce another txg, so we must signal it.
		 */
		txg = tx->tx_quiesced_txg;
		tx->tx_quiesced_txg = 0;
		tx->tx_syncing_txg = txg;
		cv_broadcast(&tx->tx_quiesce_more_cv);

		th = dsl_pool_txg_history_get(dp, txg);
		th->th_kstat.state = TXG_STATE_SYNCING;
		vdev_get_stats(spa->spa_root_vdev, &th->th_vs1);
		dsl_pool_txg_history_put(th);

		dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
		    txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
		mutex_exit(&tx->tx_sync_lock);

		start = ddi_get_lbolt();
		hrstart = gethrtime();
		spa_sync(spa, txg);
		delta = ddi_get_lbolt() - start;

		mutex_enter(&tx->tx_sync_lock);
		tx->tx_synced_txg = txg;
		tx->tx_syncing_txg = 0;
		cv_broadcast(&tx->tx_sync_done_cv);

		/*
		 * Dispatch commit callbacks to worker threads.
		 */
		txg_dispatch_callbacks(dp, txg);

		/*
		 * Measure the txg sync time determine the amount of I/O done.
		 */
		th = dsl_pool_txg_history_get(dp, txg);
		vdev_get_stats(spa->spa_root_vdev, &th->th_vs2);
		th->th_kstat.sync_time = gethrtime() - hrstart;
		th->th_kstat.nread = th->th_vs2.vs_bytes[ZIO_TYPE_READ] -
		    th->th_vs1.vs_bytes[ZIO_TYPE_READ];
		th->th_kstat.nwritten = th->th_vs2.vs_bytes[ZIO_TYPE_WRITE] -
		    th->th_vs1.vs_bytes[ZIO_TYPE_WRITE];
		th->th_kstat.reads = th->th_vs2.vs_ops[ZIO_TYPE_READ] -
		    th->th_vs1.vs_ops[ZIO_TYPE_READ];
		th->th_kstat.writes = th->th_vs2.vs_ops[ZIO_TYPE_WRITE] -
		    th->th_vs1.vs_ops[ZIO_TYPE_WRITE];
		th->th_kstat.state = TXG_STATE_COMMITTED;
		dsl_pool_txg_history_put(th);
	}
}
Пример #7
0
Файл: mmp.c Проект: LLNL/zfs
static void
mmp_thread(void *arg)
{
	spa_t *spa = (spa_t *)arg;
	mmp_thread_t *mmp = &spa->spa_mmp;
	boolean_t last_spa_suspended = spa_suspended(spa);
	boolean_t last_spa_multihost = spa_multihost(spa);
	callb_cpr_t cpr;
	hrtime_t max_fail_ns = zfs_multihost_fail_intervals *
	    MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));

	mmp_thread_enter(mmp, &cpr);

	/*
	 * The mmp_write_done() function calculates mmp_delay based on the
	 * prior value of mmp_delay and the elapsed time since the last write.
	 * For the first mmp write, there is no "last write", so we start
	 * with fake, but reasonable, default non-zero values.
	 */
	mmp->mmp_delay = MSEC2NSEC(MAX(zfs_multihost_interval,
	    MMP_MIN_INTERVAL)) / MAX(vdev_count_leaves(spa), 1);
	mmp->mmp_last_write = gethrtime() - mmp->mmp_delay;

	while (!mmp->mmp_thread_exiting) {
		uint64_t mmp_fail_intervals = zfs_multihost_fail_intervals;
		uint64_t mmp_interval = MSEC2NSEC(
		    MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
		boolean_t suspended = spa_suspended(spa);
		boolean_t multihost = spa_multihost(spa);
		hrtime_t start, next_time;

		start = gethrtime();
		if (multihost) {
			next_time = start + mmp_interval /
			    MAX(vdev_count_leaves(spa), 1);
		} else {
			next_time = start + MSEC2NSEC(MMP_DEFAULT_INTERVAL);
		}

		/*
		 * When MMP goes off => on, or spa goes suspended =>
		 * !suspended, we know no writes occurred recently.  We
		 * update mmp_last_write to give us some time to try.
		 */
		if ((!last_spa_multihost && multihost) ||
		    (last_spa_suspended && !suspended)) {
			mutex_enter(&mmp->mmp_io_lock);
			mmp->mmp_last_write = gethrtime();
			mutex_exit(&mmp->mmp_io_lock);
		} else if (last_spa_multihost && !multihost) {
			mutex_enter(&mmp->mmp_io_lock);
			mmp->mmp_delay = 0;
			mutex_exit(&mmp->mmp_io_lock);
		}
		last_spa_multihost = multihost;
		last_spa_suspended = suspended;

		/*
		 * Smooth max_fail_ns when its factors are decreased, because
		 * making (max_fail_ns < mmp_interval) results in the pool being
		 * immediately suspended before writes can occur at the new
		 * higher frequency.
		 */
		if ((mmp_interval * mmp_fail_intervals) < max_fail_ns) {
			max_fail_ns = ((31 * max_fail_ns) + (mmp_interval *
			    mmp_fail_intervals)) / 32;
		} else {
			max_fail_ns = mmp_interval * mmp_fail_intervals;
		}

		/*
		 * Suspend the pool if no MMP write has succeeded in over
		 * mmp_interval * mmp_fail_intervals nanoseconds.
		 */
		if (!suspended && mmp_fail_intervals && multihost &&
		    (start - mmp->mmp_last_write) > max_fail_ns) {
			zio_suspend(spa, NULL);
		}

		if (multihost)
			mmp_write_uberblock(spa);

		CALLB_CPR_SAFE_BEGIN(&cpr);
		(void) cv_timedwait_sig(&mmp->mmp_thread_cv,
		    &mmp->mmp_thread_lock, ddi_get_lbolt() +
		    ((next_time - gethrtime()) / (NANOSEC / hz)));
		CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
	}

	/* Outstanding writes are allowed to complete. */
	if (mmp->mmp_zio_root)
		zio_wait(mmp->mmp_zio_root);

	mmp->mmp_zio_root = NULL;
	mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
}
Пример #8
0
/* local or global wall-clock time in seconds */
double elg_pform_wtime() {
  return (double) gethrtime() * 1.0e-9;
}
Пример #9
0
Файл: txg.c Проект: awesome/zfs
static void
txg_sync_thread(void *arg)
{
	dsl_pool_t *dp = (dsl_pool_t *)arg;
	spa_t *spa = dp->dp_spa;
	tx_state_t *tx = &dp->dp_tx;
	callb_cpr_t cpr;
	vdev_stat_t *vs1, *vs2;
	uint64_t start, delta;

#ifdef _KERNEL
	/*
	 * Annotate this process with a flag that indicates that it is
	 * unsafe to use KM_SLEEP during memory allocations due to the
	 * potential for a deadlock.  KM_PUSHPAGE should be used instead.
	 */
	//current->flags |= PF_NOFS;
#endif /* _KERNEL */

	txg_thread_enter(tx, &cpr);

	vs1 = kmem_alloc(sizeof (vdev_stat_t), KM_PUSHPAGE);
	vs2 = kmem_alloc(sizeof (vdev_stat_t), KM_PUSHPAGE);

	start = delta = 0;
	for (;;) {
		uint64_t timer, timeout;
		uint64_t txg;

		timeout = zfs_txg_timeout * hz;

		/*
		 * We sync when we're scanning, there's someone waiting
		 * on us, or the quiesce thread has handed off a txg to
		 * us, or we have reached our timeout.
		 */
		timer = (delta >= timeout ? 0 : timeout - delta);
		while (!dsl_scan_active(dp->dp_scan) &&
		    !tx->tx_exiting && timer > 0 &&
		    tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
		    tx->tx_quiesced_txg == 0 &&
		    dp->dp_dirty_total < zfs_dirty_data_sync) {
			dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
			    tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
			txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
			delta = ddi_get_lbolt() - start;
			timer = (delta > timeout ? 0 : timeout - delta);
		}

		/*
		 * Wait until the quiesce thread hands off a txg to us,
		 * prompting it to do so if necessary.
		 */
		while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
			if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
				tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
			cv_broadcast(&tx->tx_quiesce_more_cv);
			txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
		}

		if (tx->tx_exiting) {
			kmem_free(vs2, sizeof (vdev_stat_t));
			kmem_free(vs1, sizeof (vdev_stat_t));
			txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
		}

		vdev_get_stats(spa->spa_root_vdev, vs1);

		/*
		 * Consume the quiesced txg which has been handed off to
		 * us.  This may cause the quiescing thread to now be
		 * able to quiesce another txg, so we must signal it.
		 */
		txg = tx->tx_quiesced_txg;
		tx->tx_quiesced_txg = 0;
		tx->tx_syncing_txg = txg;
		DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
		cv_broadcast(&tx->tx_quiesce_more_cv);

		dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
		    txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
		mutex_exit(&tx->tx_sync_lock);

		spa_txg_history_set(spa, txg, TXG_STATE_WAIT_FOR_SYNC,
		    gethrtime());

		start = ddi_get_lbolt();
		spa_sync(spa, txg);
		delta = ddi_get_lbolt() - start;

		mutex_enter(&tx->tx_sync_lock);
		tx->tx_synced_txg = txg;
		tx->tx_syncing_txg = 0;
		DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
		cv_broadcast(&tx->tx_sync_done_cv);

		/*
		 * Dispatch commit callbacks to worker threads.
		 */
		txg_dispatch_callbacks(dp, txg);

		vdev_get_stats(spa->spa_root_vdev, vs2);
		spa_txg_history_set_io(spa, txg,
		    vs2->vs_bytes[ZIO_TYPE_READ]-vs1->vs_bytes[ZIO_TYPE_READ],
		    vs2->vs_bytes[ZIO_TYPE_WRITE]-vs1->vs_bytes[ZIO_TYPE_WRITE],
		    vs2->vs_ops[ZIO_TYPE_READ]-vs1->vs_ops[ZIO_TYPE_READ],
		    vs2->vs_ops[ZIO_TYPE_WRITE]-vs1->vs_ops[ZIO_TYPE_WRITE],
		    dp->dp_dirty_pertxg[txg & TXG_MASK]);
		spa_txg_history_set(spa, txg, TXG_STATE_SYNCED, gethrtime());
	}
}
Пример #10
0
void
sda_slot_thread(void *arg)
{
	sda_slot_t	*slot = arg;

	for (;;) {
		sda_cmd_t	*cmdp;
		boolean_t	datline;
		sda_err_t	rv;

		mutex_enter(&slot->s_evlock);

		/*
		 * Process any abort list first.
		 */
		if ((cmdp = list_head(&slot->s_abortlist)) != NULL) {
			list_remove(&slot->s_abortlist, cmdp);
			mutex_exit(&slot->s_evlock);
			/*
			 * EOK used here, to avoid clobbering previous
			 * error code.
			 */
			sda_cmd_notify(cmdp, SDA_CMDF_BUSY | SDA_CMDF_DAT,
			    SDA_EOK);
			continue;
		}

		if (slot->s_detach) {
			/* Parent is detaching the slot, bail out. */
			break;
		}

		if ((slot->s_suspend) && (slot->s_xfrp == NULL)) {
			/*
			 * Host wants to suspend, but don't do it if
			 * we have a transfer outstanding.
			 */
			break;
		}

		if (slot->s_detect) {
			slot->s_detect = B_FALSE;
			mutex_exit(&slot->s_evlock);

			sda_slot_handle_detect(slot);
			continue;
		}

		if (slot->s_xfrdone) {
			sda_err_t	errno;

			errno = slot->s_errno;
			slot->s_errno = SDA_EOK;
			slot->s_xfrdone = B_FALSE;
			mutex_exit(&slot->s_evlock);

			sda_slot_handle_transfer(slot, errno);
			continue;
		}

		if (slot->s_fault != SDA_FAULT_NONE) {
			sda_fault_t	fault;

			fault = slot->s_fault;
			slot->s_fault = SDA_FAULT_NONE;
			mutex_exit(&slot->s_evlock);

			sda_slot_handle_fault(slot, fault);
			continue;
		}

		if ((slot->s_xfrp != NULL) && (gethrtime() > slot->s_xfrtmo)) {
			/*
			 * The device stalled processing the data request.
			 * At this point, we really have no choice but to
			 * nuke the request, and flag a fault.
			 */
			mutex_exit(&slot->s_evlock);
			sda_slot_handle_transfer(slot, SDA_ETIME);
			sda_slot_fault(slot, SDA_FAULT_TIMEOUT);
			continue;
		}

		/*
		 * If the slot has suspended, then we can't process
		 * any new commands yet.
		 */
		if ((slot->s_suspend) || (!slot->s_wake)) {

			/*
			 * We use a timed wait if we are waiting for a
			 * data transfer to complete.  Otherwise we
			 * avoid the timed wait to avoid waking CPU
			 * (power savings.)
			 */

			if ((slot->s_xfrp != NULL) || (slot->s_reap)) {
				/* Wait 3 sec (reap attempts). */
				(void) cv_reltimedwait(&slot->s_evcv,
				    &slot->s_evlock, drv_usectohz(3000000),
				    TR_CLOCK_TICK);
			} else {
				(void) cv_wait(&slot->s_evcv, &slot->s_evlock);
			}

			mutex_exit(&slot->s_evlock);
			continue;
		}

		slot->s_wake = B_FALSE;

		mutex_exit(&slot->s_evlock);

		/*
		 * We're awake now, so look for work to do.  First
		 * acquire access to the slot.
		 */
		sda_slot_enter(slot);


		/*
		 * If no more commands to process, go back to sleep.
		 */
		if ((cmdp = list_head(&slot->s_cmdlist)) == NULL) {
			sda_slot_exit(slot);
			continue;
		}

		/*
		 * If the current command is not an initialization
		 * command, but we are initializing, go back to sleep.
		 * (This happens potentially during a card reset or
		 * suspend/resume cycle, where the card has not been
		 * removed, but a reset is in progress.)
		 */
		if (slot->s_init && !(cmdp->sc_flags & SDA_CMDF_INIT)) {
			sda_slot_exit(slot);
			continue;
		}

		datline = ((cmdp->sc_flags & SDA_CMDF_DAT) != 0);

		if (datline) {
			/*
			 * If the current command has a data phase
			 * while a transfer is in progress, then go
			 * back to sleep.
			 */
			if (slot->s_xfrp != NULL) {
				sda_slot_exit(slot);
				continue;
			}

			/*
			 * Note that APP_CMD doesn't have a data phase,
			 * although the associated ACMD might.
			 */
			if (cmdp->sc_index != CMD_APP_CMD) {
				slot->s_xfrp = cmdp;
				/*
				 * All commands should complete in
				 * less than 5 seconds.  The worst
				 * case is actually somewhere around 4
				 * seconds, but that is when the clock
				 * is only 100 kHz.
				 */
				slot->s_xfrtmo = gethrtime() +
				    5000000000ULL;
				(void) sda_setprop(slot, SDA_PROP_LED, 1);
			}
		}

		/*
		 * We're committed to dispatching this command now,
		 * so remove it from the list.
		 */
		list_remove(&slot->s_cmdlist, cmdp);

		/*
		 * There could be more commands after this one, so we
		 * mark ourself so we stay awake for another cycle.
		 */
		sda_slot_wakeup(slot);

		/*
		 * Submit the command.  Note that we are holding the
		 * slot lock here, so it is critical that the caller
		 * *not* call back up into the framework.  The caller
		 * must break context.  But doing it this way prevents
		 * a critical race on card removal.
		 *
		 * Note that we don't resubmit memory to the device if
		 * it isn't flagged as ready (e.g. if the wrong device
		 * was inserted!)
		 */
		if ((!slot->s_ready) && (cmdp->sc_flags & SDA_CMDF_MEM)) {
			rv = SDA_ENODEV;
		} else {
			rv = slot->s_ops.so_cmd(slot->s_prv, cmdp);
		}
		if (rv == SDA_EOK)
			rv = sda_slot_check_response(cmdp);

		if (rv == SDA_EOK) {
			/*
			 * If APP_CMD completed properly, then
			 * resubmit with ACMD index.  Note wake was
			 * already set above.
			 */
			if (cmdp->sc_index == CMD_APP_CMD) {
				if ((cmdp->sc_response[0] & R1_APP_CMD) == 0) {
					sda_slot_log(slot, "APP_CMD not set!");
				}
				sda_cmd_resubmit_acmd(slot, cmdp);
				sda_slot_exit(slot);

				continue;
			}

		} else if (datline) {
			/*
			 * If an error occurred and we were expecting
			 * a transfer phase, we have to clean up.
			 */
			(void) sda_setprop(slot, SDA_PROP_LED, 0);
			slot->s_xfrp = NULL;
			slot->s_xfrtmo = 0;

			/*
			 * And notify any waiter.
			 */
			sda_slot_exit(slot);
			sda_cmd_notify(cmdp, SDA_CMDF_BUSY | SDA_CMDF_DAT, rv);
			continue;
		}

		/*
		 * Wake any waiter.
		 */
		sda_slot_exit(slot);
		sda_cmd_notify(cmdp, SDA_CMDF_BUSY, rv);
	}

	mutex_exit(&slot->s_evlock);
}
Пример #11
0
static void
do_fast_random_poll (void)
{
    static void (*fnc)( void (*)(const void*, size_t, int), int) = NULL;
    static int initialized = 0;

    assert (pool_is_locked);

    rndstats.fastpolls++;

    if (!initialized )
    {
        if (!is_initialized )
            initialize();
        initialized = 1;
        fnc = getfnc_fast_random_poll ();
    }

    if (fnc)
        (*fnc)( add_randomness, 1 );

    /* Continue with the generic functions. */
#if HAVE_GETHRTIME
    {
        hrtime_t tv;
        tv = gethrtime();
        add_randomness( &tv, sizeof(tv), 1 );
    }
#elif HAVE_GETTIMEOFDAY
    {
        struct timeval tv;
        if( gettimeofday( &tv, NULL ) )
            BUG();
        add_randomness( &tv.tv_sec, sizeof(tv.tv_sec), 1 );
        add_randomness( &tv.tv_usec, sizeof(tv.tv_usec), 1 );
    }
#elif HAVE_CLOCK_GETTIME
    {   struct timespec tv;
        if( clock_gettime( CLOCK_REALTIME, &tv ) == -1 )
            BUG();
        add_randomness( &tv.tv_sec, sizeof(tv.tv_sec), 1 );
        add_randomness( &tv.tv_nsec, sizeof(tv.tv_nsec), 1 );
    }
#else /* use times */
# ifndef HAVE_DOSISH_SYSTEM
    {   struct tms buf;
        times( &buf );
        add_randomness( &buf, sizeof buf, 1 );
    }
# endif
#endif

#ifdef HAVE_GETRUSAGE
# ifdef RUSAGE_SELF
    {
        struct rusage buf;
        /* QNX/Neutrino does return ENOSYS - so we just ignore it and
         * add whatever is in buf.  In a chroot environment it might not
         * work at all (i.e. because /proc/ is not accessible), so we better
         * ugnore all error codes and hope for the best
         */
        getrusage (RUSAGE_SELF, &buf );
        add_randomness( &buf, sizeof buf, 1 );
        memset( &buf, 0, sizeof buf );
    }
# else /*!RUSAGE_SELF*/
#  ifdef __GCC__
#   warning There is no RUSAGE_SELF on this system
#  endif
# endif /*!RUSAGE_SELF*/
#endif /*HAVE_GETRUSAGE*/

    /* time and clock are availabe on all systems - so we better do it
       just in case one of the above functions didn't work */
    {
        time_t x = time(NULL);
        add_randomness( &x, sizeof(x), 1 );
    }
    {
        clock_t x = clock();
        add_randomness( &x, sizeof(x), 1 );
    }
}
Пример #12
0
Файл: sunos.c Проект: 0x00A/uvxx
uint64_t uv_hrtime() {
  return (gethrtime());
}
Пример #13
0
static boolean_t
pcn_send(pcn_t *pcnp, mblk_t *mp)
{
	size_t		len;
	pcn_buf_t	*txb;
	pcn_tx_desc_t	*tmd;
	int		txsend;

	ASSERT(mutex_owned(&pcnp->pcn_xmtlock));
	ASSERT(mp != NULL);

	len = msgsize(mp);
	if (len > ETHERVLANMTU) {
		pcnp->pcn_macxmt_errors++;
		freemsg(mp);
		return (B_TRUE);
	}

	if (pcnp->pcn_txavail < PCN_TXRECLAIM)
		pcn_reclaim(pcnp);

	if (pcnp->pcn_txavail == 0) {
		pcnp->pcn_wantw = B_TRUE;

		/* enable tx interrupt */
		PCN_CSR_SETBIT(pcnp, PCN_CSR_EXTCTL1, PCN_EXTCTL1_LTINTEN);
		return (B_FALSE);
	}

	txsend = pcnp->pcn_txsend;

	/*
	 * We copy the packet to a single buffer.  NetBSD sources suggest
	 * that if multiple segements are ever used, VMware has a bug that will
	 * only allow 8 segments to be used, while the physical chips allow 16
	 */
	txb = pcnp->pcn_txbufs[txsend];
	mcopymsg(mp, txb->pb_buf);	/* frees mp! */

	pcnp->pcn_opackets++;
	pcnp->pcn_obytes += len;
	if (txb->pb_buf[0] & 0x1) {
		if (bcmp(txb->pb_buf, pcn_broadcast, ETHERADDRL) != 0)
			pcnp->pcn_multixmt++;
		else
			pcnp->pcn_brdcstxmt++;
	}

	tmd = &pcnp->pcn_txdescp[txsend];

	SYNCBUF(txb, len, DDI_DMA_SYNC_FORDEV);
	tmd->pcn_txstat = 0;
	tmd->pcn_tbaddr = txb->pb_paddr;

	/* PCNet wants the 2's complement of the length of the buffer */
	tmd->pcn_txctl = (~(len) + 1) & PCN_TXCTL_BUFSZ;
	tmd->pcn_txctl |= PCN_TXCTL_MBO;
	tmd->pcn_txctl |= PCN_TXCTL_STP | PCN_TXCTL_ENP | PCN_TXCTL_ADD_FCS |
	    PCN_TXCTL_OWN | PCN_TXCTL_MORE_LTINT;

	SYNCTXDESC(pcnp, txsend, DDI_DMA_SYNC_FORDEV);

	pcnp->pcn_txavail--;
	pcnp->pcn_txsend = (txsend + 1) % PCN_TXRING;
	pcnp->pcn_txstall_time = gethrtime() + (5 * 1000000000ULL);

	pcn_csr_write(pcnp, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN);

	return (B_TRUE);
}
Пример #14
0
LIBCOUCHBASE_API
lcb_error_t lcb_get_replica(lcb_t instance,
                            const void *command_cookie,
                            lcb_size_t num,
                            const lcb_get_replica_cmd_t *const *items)
{
    lcb_server_t *server;
    protocol_binary_request_get req;
    int vb, idx;
    lcb_size_t ii, *affected_servers = NULL;

    /* we need a vbucket config before we can start getting data.. */
    if (instance->vbucket_config == NULL) {
        switch (instance->type) {
        case LCB_TYPE_CLUSTER:
            return lcb_synchandler_return(instance, LCB_EBADHANDLE);
        case LCB_TYPE_BUCKET:
        default:
            return lcb_synchandler_return(instance, LCB_CLIENT_ETMPFAIL);
        }
    }

    affected_servers = calloc(instance->nservers, sizeof(lcb_size_t));
    if (affected_servers == NULL) {
        return lcb_synchandler_return(instance, LCB_CLIENT_ENOMEM);
    }
    memset(&req, 0, sizeof(req));
    req.message.header.request.magic = PROTOCOL_BINARY_REQ;
    req.message.header.request.datatype = PROTOCOL_BINARY_RAW_BYTES;
    req.message.header.request.opcode = CMD_GET_REPLICA;
    for (ii = 0; ii < num; ++ii) {
        const void *key;
        lcb_size_t nkey;
        int r0, r1;
        lcb_replica_t strategy;
        struct lcb_command_data_st ct;

        memset(&ct, 0, sizeof(struct lcb_command_data_st));
        ct.start = gethrtime();
        ct.cookie = command_cookie;
        strategy = LCB_REPLICA_FIRST;
        r0 = 0; /* begin */
        r1 = 0; /* end */

        switch (items[ii]->version) {
        case 0:
            key = items[ii]->v.v0.key;
            nkey = items[ii]->v.v0.nkey;
            break;
        case 1:
            key = items[ii]->v.v1.key;
            nkey = items[ii]->v.v1.nkey;
            strategy = items[ii]->v.v1.strategy;
            switch (strategy) {
            case LCB_REPLICA_FIRST:
                r0 = r1 = 0;
                /* iterate replicas in a sequence until first
                 * successful response */
                ct.replica = 0;
                break;
            case LCB_REPLICA_SELECT:
                r0 = r1 = items[ii]->v.v1.index;
                if (r0 >= instance->nreplicas) {
                    return lcb_synchandler_return(instance, LCB_EINVAL);
                }
                ct.replica = -1; /* do not iterate */
                break;
            case LCB_REPLICA_ALL:
                r0 = 0;
                r1 = instance->nreplicas;
                ct.replica = -1; /* do not iterate */
                break;
            }
            break;
        default:
            return lcb_synchandler_return(instance, LCB_EINVAL);
        }

        do {
            vb = vbucket_get_vbucket_by_key(instance->vbucket_config,
                                            key, nkey);
            idx = vbucket_get_replica(instance->vbucket_config, vb, r0);
            if (idx < 0 || idx > (int)instance->nservers) {
                free(affected_servers);
                /* FIXME: when 'packet' patch will be applied, here
                 * should be rollback of all the previous commands
                 * queued */
                return lcb_synchandler_return(instance, LCB_NO_MATCHING_SERVER);
            }
            affected_servers[idx]++;
            server = instance->servers + idx;
            req.message.header.request.keylen = ntohs((lcb_uint16_t)nkey);
            req.message.header.request.vbucket = ntohs((lcb_uint16_t)vb);
            req.message.header.request.bodylen = ntohl((lcb_uint32_t)nkey);
            req.message.header.request.opaque = ++instance->seqno;

            lcb_server_start_packet_ex(server, &ct, req.bytes,
                                       sizeof(req.bytes));
            lcb_server_write_packet(server, key, nkey);
            lcb_server_end_packet(server);
            ++r0;
        } while (r0 < r1);
    }

    for (ii = 0; ii < instance->nservers; ++ii) {
        if (affected_servers[ii]) {
            server = instance->servers + ii;
            lcb_server_send_packets(server);
        }
    }

    free(affected_servers);
    return lcb_synchandler_return(instance, LCB_SUCCESS);
}
Пример #15
0
asmlinkage void sys_timerReset_k(struct spark_timer *ptimer)
{
  ptimer->start = gethrtime();
  ptimer->msec = 0;
  return;
}
Пример #16
0
/*
 * Takes a "snapshot" of the global statistics. Actually, it calculates
 * them from the local statistics maintained by each flowop.
 * First the routine pauses filebench, then rolls the statistics for
 * each flowop into its associated FLOW_MASTER flowop.
 * Next all the FLOW_MASTER flowops' statistics are written
 * to the log file followed by the global totals. Then filebench
 * operation is allowed to resume.
 */
void
stats_snap(void)
{
	struct flowstats *iostat = &globalstats[FLOW_TYPE_IO];
	struct flowstats *aiostat = &globalstats[FLOW_TYPE_AIO];
	hrtime_t orig_starttime;
	flowop_t *flowop;
	char *str;
	double total_time_sec;

	if (!globalstats) {
		filebench_log(LOG_ERROR,
		    "'stats snap' called before 'stats clear'");
		return;
	}

	/* don't print out if run ended in error */
	if (filebench_shm->shm_f_abort == FILEBENCH_ABORT_ERROR) {
		filebench_log(LOG_ERROR,
		    "NO VALID RESULTS! Filebench run terminated prematurely");
		return;
	}

	/* Freeze statistics during update */
	filebench_shm->shm_bequiet = 1;

	/* We want to have blank global statistics each
	 * time we start the summation process, but the
	 * statistics collection start time must remain
	 * unchanged (it's a snapshot compared to the original
	 * start time). */
	orig_starttime = globalstats->fs_stime;
	(void) memset(globalstats, 0, FLOW_TYPES * sizeof(struct flowstats));
	globalstats->fs_stime = orig_starttime;
	globalstats->fs_etime = gethrtime();

	total_time_sec = (globalstats->fs_etime -
			globalstats->fs_stime) / SEC2NS_FLOAT;
	filebench_log(LOG_DEBUG_SCRIPT, "Stats period = %.0f sec",
			total_time_sec);

	/* Similarly we blank the master flowop statistics */
	flowop = filebench_shm->shm_flowoplist;
	while (flowop) {
		if (flowop->fo_instance == FLOW_MASTER) {
			(void) memset(&flowop->fo_stats, 0, sizeof(struct flowstats));
			flowop->fo_stats.fs_minlat = ULLONG_MAX;
		}
		flowop = flowop->fo_next;
	}

	/* Roll up per-flowop statistics in globalstats and master flowops */
	flowop = filebench_shm->shm_flowoplist;
	while (flowop) {
		flowop_t *flowop_master;

		if (flowop->fo_instance <= FLOW_DEFINITION) {
			flowop = flowop->fo_next;
			continue;
		}

		/* Roll up per-flowop into global stats */
		stats_add(&globalstats[flowop->fo_type], &flowop->fo_stats);
		stats_add(&globalstats[FLOW_TYPE_GLOBAL], &flowop->fo_stats);

		flowop_master = flowop_find_one(flowop->fo_name, FLOW_MASTER);
		if (flowop_master) {
			/* Roll up per-flowop stats into master */
			stats_add(&flowop_master->fo_stats, &flowop->fo_stats);
		} else {
			filebench_log(LOG_DEBUG_NEVER,
			    "flowop_stats could not find %s",
			    flowop->fo_name);
		}

		filebench_log(LOG_DEBUG_SCRIPT,
		    "flowop %-20s-%4d  - %5d ops %5.1lf ops/sec %5.1lfmb/s "
		    "%8.3fms/op",
		    flowop->fo_name,
		    flowop->fo_instance,
		    flowop->fo_stats.fs_count,
		    flowop->fo_stats.fs_count / total_time_sec,
		    (flowop->fo_stats.fs_bytes / MB_FLOAT) / total_time_sec,
		    flowop->fo_stats.fs_count ?
		    flowop->fo_stats.fs_total_lat /
		    (flowop->fo_stats.fs_count * SEC2MS_FLOAT) : 0);

		flowop = flowop->fo_next;

	}

	flowop = filebench_shm->shm_flowoplist;
	str = malloc(1048576);
	*str = '\0';
	(void) strcpy(str, "Per-Operation Breakdown\n");
	while (flowop) {
		char line[1024];
		char histogram[1024];
		char hist_reading[20];
		int i = 0;

		if (flowop->fo_instance != FLOW_MASTER) {
			flowop = flowop->fo_next;
			continue;
		}

		(void) snprintf(line, sizeof(line), "%-20s %dops %8.0lfops/s "
		    "%5.1lfmb/s %8.1fms/op",
		    flowop->fo_name,
		    flowop->fo_stats.fs_count,
		    flowop->fo_stats.fs_count / total_time_sec,
		    (flowop->fo_stats.fs_bytes / MB_FLOAT) / total_time_sec,
		    flowop->fo_stats.fs_count ?
		    flowop->fo_stats.fs_total_lat /
		    (flowop->fo_stats.fs_count * SEC2MS_FLOAT) : 0);
		(void) strcat(str, line);

		(void) snprintf(line, sizeof(line)," [%.2fms - %5.2fms]",
			flowop->fo_stats.fs_minlat / SEC2MS_FLOAT,
			flowop->fo_stats.fs_maxlat / SEC2MS_FLOAT);
		(void) strcat(str, line);

		if (filebench_shm->lathist_enabled) {
			(void) sprintf(histogram, "\t[ ");
			for (i = 0; i < OSPROF_BUCKET_NUMBER; i++) {
				(void) sprintf(hist_reading, "%lu ",
				flowop->fo_stats.fs_distribution[i]);
				(void) strcat(histogram, hist_reading);
			}
			(void) strcat(histogram, "]\n");
			(void) strcat(str, histogram);
		} else
			(void) strcat(str, "\n");

		flowop = flowop->fo_next;
	}

	/* removing last \n  */
	str[strlen(str) - 1] = '\0';

	filebench_log(LOG_INFO, "%s", str);
	free(str);

	filebench_log(LOG_INFO,
	    "IO Summary: %5d ops %5.3lf ops/s %0.0lf/%0.0lf rd/wr "
	    "%5.1lfmb/s %5.1fms/op",
	    iostat->fs_count + aiostat->fs_count,
	    (iostat->fs_count + aiostat->fs_count) / total_time_sec,
	    (iostat->fs_rcount + aiostat->fs_rcount) / total_time_sec,
	    (iostat->fs_wcount + aiostat->fs_wcount) / total_time_sec,
	    ((iostat->fs_bytes + aiostat->fs_bytes) / MB_FLOAT)
						/ total_time_sec,
	    (iostat->fs_rcount + iostat->fs_wcount) ?
	    iostat->fs_total_lat /
	    ((iostat->fs_rcount + iostat->fs_wcount) * SEC2MS_FLOAT) : 0);

	filebench_shm->shm_bequiet = 0;
}
Пример #17
0
static lcb_error_t
do_store3(lcb_t instance, const void *cookie,
    const lcb_CMDBASE *cmd, int is_durstore)
{
    mc_PIPELINE *pipeline;
    mc_PACKET *packet;
    mc_REQDATA *rdata;
    mc_CMDQUEUE *cq = &instance->cmdq;
    int hsize;
    int should_compress = 0;
    lcb_error_t err;

    lcb_storage_t operation;
    lcb_U32 flags;
    const lcb_VALBUF *vbuf;
    lcb_datatype_t datatype;

    protocol_binary_request_set scmd;
    protocol_binary_request_header *hdr = &scmd.message.header;

    if (!is_durstore) {
        const lcb_CMDSTORE *simple_cmd = (const lcb_CMDSTORE *)cmd;
        operation = simple_cmd->operation;
        flags = simple_cmd->flags;
        vbuf = &simple_cmd->value;
        datatype = simple_cmd->datatype;
    } else {
        const lcb_CMDSTOREDUR *durcmd = (const lcb_CMDSTOREDUR *)cmd;
        operation = durcmd->operation;
        flags = durcmd->flags;
        vbuf = &durcmd->value;
        datatype = durcmd->datatype;
    }

    if (LCB_KEYBUF_IS_EMPTY(&cmd->key)) {
        return LCB_EMPTY_KEY;
    }

    err = get_esize_and_opcode(
        operation, &hdr->request.opcode, &hdr->request.extlen);
    if (err != LCB_SUCCESS) {
        return err;
    }

    switch (operation) {
    case LCB_APPEND:
    case LCB_PREPEND:
        if (cmd->exptime || flags) {
            return LCB_OPTIONS_CONFLICT;
        }
        break;
    case LCB_ADD:
        if (cmd->cas) {
            return LCB_OPTIONS_CONFLICT;
        }
        break;
    default:
        break;
    }

    hsize = hdr->request.extlen + sizeof(*hdr);

    err = mcreq_basic_packet(cq, (const lcb_CMDBASE *)cmd, hdr,
        hdr->request.extlen, &packet, &pipeline, MCREQ_BASICPACKET_F_FALLBACKOK);

    if (err != LCB_SUCCESS) {
        return err;
    }

    should_compress = can_compress(instance, pipeline, vbuf, datatype);
    if (should_compress) {
        int rv = mcreq_compress_value(pipeline, packet, &vbuf->u_buf.contig);
        if (rv != 0) {
            mcreq_release_packet(pipeline, packet);
            return LCB_CLIENT_ENOMEM;
        }
    } else {
        mcreq_reserve_value(pipeline, packet, vbuf);
    }

    if (is_durstore) {
        int duropts = 0;
        lcb_U16 persist_u , replicate_u;
        const lcb_CMDSTOREDUR *dcmd = (const lcb_CMDSTOREDUR *)cmd;
        DURSTORECTX *dctx = calloc(1, sizeof(*dctx));

        persist_u = dcmd->persist_to;
        replicate_u = dcmd->replicate_to;
        if (dcmd->replicate_to == -1 || dcmd->persist_to == -1) {
            duropts = LCB_DURABILITY_VALIDATE_CAPMAX;
        }

        err = lcb_durability_validate(instance, &persist_u, &replicate_u, duropts);
        if (err != LCB_SUCCESS) {
            mcreq_wipe_packet(pipeline, packet);
            mcreq_release_packet(pipeline, packet);
            return err;
        }

        dctx->instance = instance;
        dctx->persist_to = persist_u;
        dctx->replicate_to = replicate_u;
        packet->u_rdata.exdata = &dctx->base;
        packet->flags |= MCREQ_F_REQEXT;

        dctx->base.cookie = cookie;
        dctx->base.procs = &storedur_procs;
    }

    rdata = MCREQ_PKT_RDATA(packet);
    rdata->cookie = cookie;
    rdata->start = gethrtime();

    scmd.message.body.expiration = htonl(cmd->exptime);
    scmd.message.body.flags = htonl(flags);
    hdr->request.magic = PROTOCOL_BINARY_REQ;
    hdr->request.cas = cmd->cas;
    hdr->request.datatype = PROTOCOL_BINARY_RAW_BYTES;

    if (should_compress || (datatype & LCB_VALUE_F_SNAPPYCOMP)) {
        hdr->request.datatype |= PROTOCOL_BINARY_DATATYPE_COMPRESSED;
    }
    if (datatype & LCB_VALUE_F_JSON) {
        hdr->request.datatype |= PROTOCOL_BINARY_DATATYPE_JSON;
    }

    hdr->request.opaque = packet->opaque;
    hdr->request.bodylen = htonl(
            hdr->request.extlen + ntohs(hdr->request.keylen)
            + get_value_size(packet));

    memcpy(SPAN_BUFFER(&packet->kh_span), scmd.bytes, hsize);
    mcreq_sched_add(pipeline, packet);
    TRACE_STORE_BEGIN(hdr, (lcb_CMDSTORE* )cmd);
    return LCB_SUCCESS;
}
Пример #18
0
static void
trim_map_segment_add(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
{
	avl_index_t where;
	trim_seg_t tsearch, *ts_before, *ts_after, *ts;
	boolean_t merge_before, merge_after;
	hrtime_t time;

	ASSERT(MUTEX_HELD(&tm->tm_lock));
	VERIFY(start < end);

	time = gethrtime();
	tsearch.ts_start = start;
	tsearch.ts_end = end;

	ts = avl_find(&tm->tm_queued_frees, &tsearch, &where);
	if (ts != NULL) {
		if (start < ts->ts_start)
			trim_map_segment_add(tm, start, ts->ts_start, txg);
		if (end > ts->ts_end)
			trim_map_segment_add(tm, ts->ts_end, end, txg);
		return;
	}

	ts_before = avl_nearest(&tm->tm_queued_frees, where, AVL_BEFORE);
	ts_after = avl_nearest(&tm->tm_queued_frees, where, AVL_AFTER);

	merge_before = (ts_before != NULL && ts_before->ts_end == start);
	merge_after = (ts_after != NULL && ts_after->ts_start == end);

	if (merge_before && merge_after) {
		TRIM_MAP_SINC(tm, ts_after->ts_start - ts_before->ts_end);
		TRIM_MAP_QDEC(tm);
		avl_remove(&tm->tm_queued_frees, ts_before);
		list_remove(&tm->tm_head, ts_before);
		ts_after->ts_start = ts_before->ts_start;
		ts_after->ts_txg = txg;
		ts_after->ts_time = time;
		kmem_free(ts_before, sizeof (*ts_before));
	} else if (merge_before) {
		TRIM_MAP_SINC(tm, end - ts_before->ts_end);
		ts_before->ts_end = end;
		ts_before->ts_txg = txg;
		ts_before->ts_time = time;
	} else if (merge_after) {
		TRIM_MAP_SINC(tm, ts_after->ts_start - start);
		ts_after->ts_start = start;
		ts_after->ts_txg = txg;
		ts_after->ts_time = time;
	} else {
		TRIM_MAP_SINC(tm, end - start);
		TRIM_MAP_QINC(tm);
		ts = kmem_alloc(sizeof (*ts), KM_SLEEP);
		ts->ts_start = start;
		ts->ts_end = end;
		ts->ts_txg = txg;
		ts->ts_time = time;
		avl_insert(&tm->tm_queued_frees, ts, where);
		list_insert_tail(&tm->tm_head, ts);
	}
}
Пример #19
0
int main(int argc, char* argv[]) {
  Options* args = arg_parse(argc, argv);
  int matches = 0;
  float time = 0;

  if(args == NULL) {
    fprintf(stderr, "usage: %s [-d] buffers outer_r inner_r\n", argv[0]);
    exit(EXIT_FAILURE);
  }

  /* we need two buffers for IO and at least one for blocks */
  if(args->buffers < 3) {
    fprintf(stderr, "error: need at least 3 buffers\n");
    exit(EXIT_FAILURE);
  }
  int block_size = args->buffers - 2;

  FILE *fp_inner, *fp_outer;
  /* get a file pointer to inner relation, exit if unable */
  if(!e_fopen(args->outer_file, &fp_outer, "r")) {
    fprintf(stderr, "failed to open %s\n", args->outer_file);
    exit(EXIT_FAILURE);
  }
  if(!e_fopen(args->inner_file, &fp_inner, "r")) {
    fprintf(stderr, "failed to open %s\n", args->inner_file);
    exit(EXIT_FAILURE);
  }
  
  /* determine which relation is the outer and setup variables accordingly */
  int is_outer_char = is_char_relation(fp_outer);
  int o_records_pp, i_records_pp;
  if(is_outer_char) {
    o_records_pp = CHAR_RECORDS_PP;
    i_records_pp = GUILD_RECORDS_PP;
  } else {
    o_records_pp = GUILD_RECORDS_PP;
    i_records_pp = CHAR_RECORDS_PP;
  }
  #ifdef DO_TIME
    hrtime_t start, end;
    start = gethrtime();
  #endif
  Hashtable* htable = hash_init(HASH_SIZE);
  /* for each block of outer relation, scan inner relation */
  while(!feof(fp_outer)) {
    /* read in a block from outer file, where  a block is the number 
     * of available buffers, each page is a buffer and each page can 
     * hold 2 records. */
    Page** block = read_block(block_size, fp_outer, is_outer_char);

    /* hash block for faster comparisions */
    hash_block(htable, block, block_size, o_records_pp, is_outer_char);

    /* scan inner relation */
    while(!feof(fp_inner)) {
      /* read one page into the input buffer */
      Page* p = read_page(i_records_pp, fp_inner, !is_outer_char);
      for(int i = 0; i < i_records_pp; i++) {
        Character* c = NULL;
        Guild* g = NULL;
        Entry* e = NULL;
        /* we have either a character or guild from outer, so get the 
           one we don't from the hashtable */
        if(is_outer_char) {
          if(!p->g[i])
            continue;
          g = p->g[i];
          e = hash_lookup(htable, g->guild_id);
        } else {
          if(!p->c[i])
            continue;
          c = p->c[i];
          e = hash_lookup(htable, c->guild_id);
        }

        /* check all possible matches */
        while(e != NULL) {
          if(is_outer_char)
            c = (Character*)e->value;
          else
            g = (Guild*)e->value;
          
          /* compare guild id's for match */
          if(g->guild_id == c->guild_id) {
            matches++;
            /* output the tuple if -d flag set */
            if(args->dflag) {
              if(is_outer_char) {
                printf("%d,%s,%d,%d,%d,%s\n", c->guild_id, c->name, c->race, 
                    c->class, c->id, g->g_name);
              } else {
                printf("%d,%s,%s,%d,%d,%d\n", g->guild_id, g->g_name, c->name, 
                    c->race, c->class, c->id);
              }
            }
          }
          /* using chaining, so get next possible match */
          e = e->next;
        }
      }
      /* deallocate page */
      free_page(p, i_records_pp, !is_outer_char);
    }
Пример #20
0
int
main(int argc, char **argv)
{
	boolean_t	is_daemon  = B_TRUE;
	boolean_t	is_verbose;
	int		ipc_fd;
	int		c;
	int		aware = RTAW_UNDER_IPMP;
	struct rlimit	rl;

	debug_level = df_get_int("", B_FALSE, DF_DEBUG_LEVEL);
	is_verbose = df_get_bool("", B_FALSE, DF_VERBOSE);

	/*
	 * -l is ignored for compatibility with old agent.
	 */

	while ((c = getopt(argc, argv, "vd:l:fa")) != EOF) {

		switch (c) {

		case 'a':
			do_adopt = B_TRUE;
			grandparent = getpid();
			break;

		case 'd':
			debug_level = strtoul(optarg, NULL, 0);
			break;

		case 'f':
			is_daemon = B_FALSE;
			break;

		case 'v':
			is_verbose = B_TRUE;
			break;

		case '?':
			(void) fprintf(stderr, "usage: %s [-a] [-d n] [-f] [-v]"
			    "\n", argv[0]);
			return (EXIT_FAILURE);

		default:
			break;
		}
	}

	(void) setlocale(LC_ALL, "");
	(void) textdomain(TEXT_DOMAIN);

	if (geteuid() != 0) {
		dhcpmsg_init(argv[0], B_FALSE, is_verbose, debug_level);
		dhcpmsg(MSG_ERROR, "must be super-user");
		dhcpmsg_fini();
		return (EXIT_FAILURE);
	}

	if (is_daemon && daemonize() == 0) {
		dhcpmsg_init(argv[0], B_FALSE, is_verbose, debug_level);
		dhcpmsg(MSG_ERR, "cannot become daemon, exiting");
		dhcpmsg_fini();
		return (EXIT_FAILURE);
	}

	/*
	 * Seed the random number generator, since we're going to need it
	 * to set transaction id's and for exponential backoff.
	 */
	srand48(gethrtime() ^ gethostid() ^ getpid());

	dhcpmsg_init(argv[0], is_daemon, is_verbose, debug_level);
	(void) atexit(dhcpmsg_fini);

	tq = iu_tq_create();
	eh = iu_eh_create();

	if (eh == NULL || tq == NULL) {
		errno = ENOMEM;
		dhcpmsg(MSG_ERR, "cannot create timer queue or event handler");
		return (EXIT_FAILURE);
	}

	/*
	 * ignore most signals that could be reasonably generated.
	 */

	(void) signal(SIGTERM, graceful_shutdown);
	(void) signal(SIGQUIT, graceful_shutdown);
	(void) signal(SIGPIPE, SIG_IGN);
	(void) signal(SIGUSR1, SIG_IGN);
	(void) signal(SIGUSR2, SIG_IGN);
	(void) signal(SIGINT,  SIG_IGN);
	(void) signal(SIGHUP,  SIG_IGN);
	(void) signal(SIGCHLD, SIG_IGN);

	/*
	 * upon SIGTHAW we need to refresh any non-infinite leases.
	 */

	(void) iu_eh_register_signal(eh, SIGTHAW, refresh_smachs, NULL);

	class_id = get_class_id();
	if (class_id != NULL)
		class_id_len = strlen(class_id);
	else
		dhcpmsg(MSG_WARNING, "get_class_id failed, continuing "
		    "with no vendor class id");

	/*
	 * the inactivity timer is enabled any time there are no
	 * interfaces under DHCP control.  if DHCP_INACTIVITY_WAIT
	 * seconds transpire without an interface under DHCP control,
	 * the agent shuts down.
	 */

	inactivity_id = iu_schedule_timer(tq, DHCP_INACTIVITY_WAIT,
	    inactivity_shutdown, NULL);

	/*
	 * max out the number available descriptors, just in case..
	 */

	rl.rlim_cur = RLIM_INFINITY;
	rl.rlim_max = RLIM_INFINITY;
	if (setrlimit(RLIMIT_NOFILE, &rl) == -1)
		dhcpmsg(MSG_ERR, "setrlimit failed");

	(void) enable_extended_FILE_stdio(-1, -1);

	/*
	 * Create and bind default IP sockets used to control interfaces and to
	 * catch stray packets.
	 */

	if (!dhcp_ip_default())
		return (EXIT_FAILURE);

	/*
	 * create the ipc channel that the agent will listen for
	 * requests on, and register it with the event handler so that
	 * `accept_event' will be called back.
	 */

	switch (dhcp_ipc_init(&ipc_fd)) {

	case 0:
		break;

	case DHCP_IPC_E_BIND:
		dhcpmsg(MSG_ERROR, "dhcp_ipc_init: cannot bind to port "
		    "%i (agent already running?)", IPPORT_DHCPAGENT);
		return (EXIT_FAILURE);

	default:
		dhcpmsg(MSG_ERROR, "dhcp_ipc_init failed");
		return (EXIT_FAILURE);
	}

	if (iu_register_event(eh, ipc_fd, POLLIN, accept_event, 0) == -1) {
		dhcpmsg(MSG_ERR, "cannot register ipc fd for messages");
		return (EXIT_FAILURE);
	}

	/*
	 * Create the global routing socket.  This is used for monitoring
	 * interface transitions, so that we learn about the kernel's Duplicate
	 * Address Detection status, and for inserting and removing default
	 * routes as learned from DHCP servers.  Both v4 and v6 are handed
	 * with this one socket.
	 */
	rtsock_fd = socket(PF_ROUTE, SOCK_RAW, 0);
	if (rtsock_fd == -1) {
		dhcpmsg(MSG_ERR, "cannot open routing socket");
		return (EXIT_FAILURE);
	}

	/*
	 * We're IPMP-aware and can manage IPMP test addresses, so issue
	 * RT_AWARE to get routing socket messages for interfaces under IPMP.
	 */
	if (setsockopt(rtsock_fd, SOL_ROUTE, RT_AWARE, &aware,
	    sizeof (aware)) == -1) {
		dhcpmsg(MSG_ERR, "cannot set RT_AWARE on routing socket");
		return (EXIT_FAILURE);
	}

	if (iu_register_event(eh, rtsock_fd, POLLIN, rtsock_event, 0) == -1) {
		dhcpmsg(MSG_ERR, "cannot register routing socket for messages");
		return (EXIT_FAILURE);
	}

	/*
	 * if the -a (adopt) option was specified, try to adopt the
	 * kernel-managed interface before we start.
	 */

	if (do_adopt && !dhcp_adopt())
		return (EXIT_FAILURE);

	/*
	 * For DHCPv6, we own all of the interfaces marked DHCPRUNNING.  As
	 * we're starting operation here, if there are any of those interfaces
	 * lingering around, they're strays, and need to be removed.
	 *
	 * It might be nice to save these addresses off somewhere -- for both
	 * v4 and v6 -- and use them as hints for later negotiation.
	 */
	remove_v6_strays();

	/*
	 * enter the main event loop; this is where all the real work
	 * takes place (through registering events and scheduling timers).
	 * this function only returns when the agent is shutting down.
	 */

	switch (iu_handle_events(eh, tq)) {

	case -1:
		dhcpmsg(MSG_WARNING, "iu_handle_events exited abnormally");
		break;

	case DHCP_REASON_INACTIVITY:
		dhcpmsg(MSG_INFO, "no interfaces to manage, shutting down...");
		break;

	case DHCP_REASON_TERMINATE:
		dhcpmsg(MSG_INFO, "received SIGTERM, shutting down...");
		break;

	case DHCP_REASON_SIGNAL:
		dhcpmsg(MSG_WARNING, "received unexpected signal, shutting "
		    "down...");
		break;
	}

	(void) iu_eh_unregister_signal(eh, SIGTHAW, NULL);

	iu_eh_destroy(eh);
	iu_tq_destroy(tq);

	return (EXIT_SUCCESS);
}
Пример #21
0
static int load_cache(file_provider *provider)
{
    lcb_string str;
    char line[1024];
    lcb_ssize_t nr;
    int fail;
    FILE *fp = NULL;
    VBUCKET_CONFIG_HANDLE config = NULL;
    char *end;
    struct stat st;
    int status = -1;

    lcb_string_init(&str);

    if (provider->filename == NULL) {
        return -1;
    }

    fp = fopen(provider->filename, "r");
    if (fp == NULL) {
        LOG(provider, ERROR, "Couldn't open filename");
        return -1;
    }

    if (fstat(fileno(fp), &st)) {
        provider->last_errno = errno;
        goto GT_DONE;
    }

    if (provider->last_mtime == st.st_mtime) {
        LOG(provider, INFO, "Rejecting file. Modification time too old");
        goto GT_DONE;
    }

    config = vbucket_config_create();
    if (config == NULL) {
        goto GT_DONE;
    }

    lcb_string_init(&str);

    while ((nr = fread(line, 1, sizeof(line), fp)) > 0) {
        if (lcb_string_append(&str, line, nr)) {
            goto GT_DONE;
        }
    }

    if (ferror(fp)) {
        goto GT_DONE;
    }

    fclose(fp);
    fp = NULL;

    if (!str.nused) {
        status = -1;
        goto GT_DONE;
    }

    end = strstr(str.base, CONFIG_CACHE_MAGIC);
    if (end == NULL) {
        LOG(provider, ERROR, "Couldn't find magic in file");
        remove(provider->filename);
        status = -1;
        goto GT_DONE;
    }

    fail = vbucket_config_parse(config, LIBVBUCKET_SOURCE_MEMORY, str.base);
    if (fail) {
        status = -1;
        LOG(provider, ERROR, "Couldn't parse configuration");
        remove(provider->filename);
        goto GT_DONE;
    }

    if (vbucket_config_get_distribution_type(config) != VBUCKET_DISTRIBUTION_VBUCKET) {
        status = -1;
        LOG(provider, ERROR, "Not applying cached memcached config");
        goto GT_DONE;
    }

    if (provider->config) {
        lcb_clconfig_decref(provider->config);
    }

    provider->config = lcb_clconfig_create(config,
                                           &str,
                                           LCB_CLCONFIG_FILE);
    provider->config->cmpclock = gethrtime();
    provider->config->origin = provider->base.type;
    provider->last_mtime = st.st_mtime;
    status = 0;
    config = NULL;

    GT_DONE:
    if (fp != NULL) {
        fclose(fp);
    }

    if (config != NULL) {
        vbucket_config_destroy(config);
    }

    lcb_string_release(&str);
    return status;
}
Пример #22
0
int main(int argc, char** argv)
{
    /* Used to choose the instrumentation vehicle */
    enum { TIME, CLOCKGETTIME, GETRUSAGE, GETHRTIME };
    int n = 0;
    int c = 0;
    int flag = 0;
    int time_flag = 0;
    /* Parse command line arguments */
    while ((c = getopt( argc, argv, "gutrn:")) != -1)
	switch(c)
	{
	    case 'n':
		n = atoi(optarg);
		flag++;
		break;
	    case 't':
		time_flag = TIME;
		flag++;
		break;
	    case 'g':
		time_flag = CLOCKGETTIME;
		flag++;
		break;
	    case 'u':
		time_flag = GETRUSAGE;
		flag++;
		break;
	    case 'r':
/* If the system is Solaris Unix, we allow this flag.  Otherwise,
   gethrtime() is not available. This should be semi portable. */
#if defined (__SVR4) && defined (__sun)
		time_flag = GETHRTIME;
		flag++;
#else
		fprintf(stderr,"gethrtime() Not available on this system.\n");
		usage();
#endif
		break;		
	    default: usage();
	}
    /* Make sure we got all the arguments we need */
    if(flag <= 1 || !n)
	usage();

    /* Calculate memory consumption.  This is one int for every entry in an 
       n X n array, times 3 (for a,b,result). */
    double memsize = (double) n * n * sizeof(int) * 3;
    char unit[2] = " B";
    if(memsize > 1024)
    {
	memsize = memsize/1024;
	unit[0] = 'K';
    }
    if(memsize > 1024)
    {
	memsize = memsize/1024;
	unit[0] = 'M';
    }
    if(memsize > 1024)
    {
	memsize = memsize/1024;
	unit[0] = 'G';
    }        
    printf("Allocating %.2f %s of memory.\n", memsize,unit);

    /* Allocate memory for the matrices.  They are stored
       as an array of pointers, each one pointing to an array itself. 
       These lines allocate memory for the 1st dimension of the table. */
    int ** a = (int **) malloc (sizeof(int**) * n);
    int ** b = (int **) malloc (sizeof(int**) * n);
    int ** r = (int **) malloc (sizeof(int**) * n);

    /* Allocate memory for the 2nd dimension of each array. There is a list
       corresponding to each value of n. */
    int i,j;
    for(i = 0; i < n; i++)
    {
	a[i] = (int*) malloc(sizeof(int) * n);
	b[i] = (int*) malloc(sizeof(int) * n);
	r[i] = (int*) malloc(sizeof(int) * n);
    }

    /* Populate the arrays (a,b) with random numbers, modulo 100.  This is done
       to try and keep from overflowing the integer entries in the resulting array. */
    srand(time(0));
    for(i = 0;  i < n; i++)
	for(j = 0; j < n; j++)
	{
	    a[i][j] = rand() % 100;
	    b[i][j] = rand() % 100;
	    r[i][j] = 0;
	}


    /* Timing code */
    struct timespec start_time,end_time,elapsed, user,kernel;
    struct rusage start_usage,end_usage;
    /* Here we just determine what the user's choice was for instrumentation. 
       How the resulting values get printed depend on the choice.  Some values have
       multiple components. */
    if(time_flag == CLOCKGETTIME)
    {
	clock_gettime(CLOCK_REALTIME, &start_time);
	/* Multiply the matrices */
	matrix_multiply(a,b,n,r); 
	clock_gettime(CLOCK_REALTIME, &end_time);
    }
    else if(time_flag == TIME)
    {
	start_time.tv_nsec = end_time.tv_nsec = 0;
	time(&start_time.tv_sec);
	/* Multiply the matrices */
	matrix_multiply(a,b,n,r); 
	time(&end_time.tv_sec);
    }
    else if(time_flag == GETRUSAGE)
    {
	getrusage(RUSAGE_SELF, &start_usage);
	/* Multiply the matrices */
	matrix_multiply(a,b,n,r); 
	getrusage(RUSAGE_SELF, &end_usage);
	user.tv_sec = end_usage.ru_utime.tv_sec - start_usage.ru_utime.tv_sec;
	user.tv_nsec = 1000*(end_usage.ru_utime.tv_usec - start_usage.ru_utime.tv_usec);
	kernel.tv_sec = end_usage.ru_stime.tv_sec - start_usage.ru_stime.tv_sec;
	kernel.tv_nsec = 1000*(end_usage.ru_stime.tv_usec - start_usage.ru_stime.tv_usec);
	printf("%10lu.%09lu s user time\n",user.tv_sec,user.tv_nsec); 
	printf("%10lu.%09lu s kernel time\n",kernel.tv_sec,kernel.tv_nsec); 
    }
#if defined (__SVR4) && defined (__sun)
    else if(time_flag == GETHRTIME)
    {
	hrtime_t start = gethrtime();
	/* Multiply the matrices */
	matrix_multiply(a,b,n,r);
	hrtime_t end = gethrtime();
        printf("Elapsed time: %f s\n",((double)end - start)/1000000000.0);
    }
#endif
    if(time_flag != GETRUSAGE && time_flag != GETHRTIME)
    {
	elapsed.tv_sec = end_time.tv_sec - start_time.tv_sec;
	elapsed.tv_nsec = end_time.tv_nsec - start_time.tv_nsec;
	if(elapsed.tv_nsec < 0)
	{
	    elapsed.tv_sec -= 1;
	    elapsed.tv_nsec += 1000000000;
	}
	/* Print results */
	printf("Elapsed time: %ld.%ld \n", elapsed.tv_sec , elapsed.tv_nsec);
    }
    /* Print the first and last value in the array. */
    printf("Element [0][0] = %d\nElement [%d][%d] = %d\n",r[0][0],n,n,r[n-1][n-1]);
    exit(0); 
}
Пример #23
0
/**
 * Returns the real time, in seconds, or -1.0 if an error occurred.
 *
 * Time is measured since an arbitrary and OS-dependent start time.
 * The returned real time is only useful for computing an elapsed time
 * between two calls to this function.
 */
double getRealTime( )
{
#if defined(_WIN32)
  FILETIME tm;
  ULONGLONG t;
#if defined(NTDDI_WIN8) && NTDDI_VERSION >= NTDDI_WIN8
  /* Windows 8, Windows Server 2012 and later. ---------------- */
  GetSystemTimePreciseAsFileTime( &tm );
#else
  /* Windows 2000 and later. ---------------------------------- */
  GetSystemTimeAsFileTime( &tm );
#endif
  t = ((ULONGLONG)tm.dwHighDateTime << 32) | (ULONGLONG)tm.dwLowDateTime;
  return (double)t / 10000000.0;

#elif (defined(__hpux) || defined(hpux)) || ((defined(__sun__) || defined(__sun) || defined(sun)) && (defined(__SVR4) || defined(__svr4__)))
  /* HP-UX, Solaris. ------------------------------------------ */
  return (double)gethrtime( ) / 1000000000.0;

#elif defined(__MACH__) && defined(__APPLE__)
  /* OSX. ----------------------------------------------------- */
  static double timeConvert = 0.0;
  if ( timeConvert == 0.0 )
    {
      mach_timebase_info_data_t timeBase;
      (void)mach_timebase_info( &timeBase );
      timeConvert = (double)timeBase.numer /
	(double)timeBase.denom /
	1000000000.0;
    }
  return (double)mach_absolute_time( ) * timeConvert;

#elif defined(_POSIX_VERSION)
  /* POSIX. --------------------------------------------------- */
#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0)
  {
    struct timespec ts;
#if defined(CLOCK_MONOTONIC_PRECISE)
    /* BSD. --------------------------------------------- */
    const clockid_t id = CLOCK_MONOTONIC_PRECISE;
#elif defined(CLOCK_MONOTONIC_RAW)
    /* Linux. ------------------------------------------- */
    const clockid_t id = CLOCK_MONOTONIC_RAW;
#elif defined(CLOCK_HIGHRES)
    /* Solaris. ----------------------------------------- */
    const clockid_t id = CLOCK_HIGHRES;
#elif defined(CLOCK_MONOTONIC)
    /* AIX, BSD, Linux, POSIX, Solaris. ----------------- */
    const clockid_t id = CLOCK_MONOTONIC;
#elif defined(CLOCK_REALTIME)
    /* AIX, BSD, HP-UX, Linux, POSIX. ------------------- */
    const clockid_t id = CLOCK_REALTIME;
#else
    const clockid_t id = (clockid_t)-1;/* Unknown. */
#endif /* CLOCK_* */
    if ( id != (clockid_t)-1 && clock_gettime( id, &ts ) != -1 )
      return (double)ts.tv_sec +
	(double)ts.tv_nsec / 1000000000.0;
    /* Fall thru. */
  }
#endif /* _POSIX_TIMERS */

  /* AIX, BSD, Cygwin, HP-UX, Linux, OSX, POSIX, Solaris. ----- */
  struct timeval tm;
  gettimeofday( &tm, NULL );
  return (double)tm.tv_sec + (double)tm.tv_usec / 1000000.0;
#else
  return -1.0;/* Failed. */
#endif
}
Пример #24
0
double timer_() {
    return ( (double)gethrtime() / 1e9 );
}
Пример #25
0
double SuperLU_timer_() {
    return ( (double)gethrtime() / 1e9 );
}
Пример #26
0
/**
 * Restart the timer from the current point in time
 *
 * This function restarts a timer with the same interval that was
 * given to the timer_set() function. The timer will start at the
 * current time.
 *
 * \note A periodic timer will drift if this function is used to reset
 * it. For preioric timers, use the timer_reset() function instead.
 *
 * \param t A pointer to the timer.
 *
 * \sa timer_reset()
 */
void
timer_restart(struct spark_timer *t)
{
  t->start = gethrtime();
  t->msec = 0;
}
Пример #27
0
/* ARGSUSED */
static int
tswtcl_create_action(ipp_action_id_t aid, nvlist_t **nvlpp, ipp_flags_t flags)
{
	nvlist_t *nvlp;
	tswtcl_data_t *tswtcl_data;
	tswtcl_cfg_t *cfg_parms;
	char *next_action;
	uint32_t bstats;
	int rc, rc2;

	nvlp = *nvlpp;
	*nvlpp = NULL;		/* nvlist should be NULL on return */


	if ((cfg_parms = kmem_alloc(TSWTCL_CFG_SZ, KM_NOSLEEP)) == NULL) {
		nvlist_free(nvlp);
		return (ENOMEM);
	}

	/* parse red next action name */
	if ((rc = nvlist_lookup_string(nvlp, TSWTCL_RED_ACTION_NAME,
	    &next_action)) != 0) {
		nvlist_free(nvlp);
		tswtcl0dbg(("tswtcl_create_action:invalid config, red action" \
		    " name missing\n"));
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (rc);
	}
	if ((cfg_parms->red_action = ipp_action_lookup(next_action))
	    == IPP_ACTION_INVAL) {
		nvlist_free(nvlp);
		tswtcl0dbg(("tswtcl_create_action: red action invalid\n"));
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (EINVAL);
	}

	/* parse yellow next action name */
	if ((rc = nvlist_lookup_string(nvlp, TSWTCL_YELLOW_ACTION_NAME,
	    &next_action)) != 0) {
		nvlist_free(nvlp);
		tswtcl0dbg(("tswtcl_create_action:invalid config, yellow " \
		    "action name missing\n"));
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (rc);
	}
	if ((cfg_parms->yellow_action = ipp_action_lookup(next_action))
	    == IPP_ACTION_INVAL) {
		nvlist_free(nvlp);
		tswtcl0dbg(("tswtcl_create_action: yellow action invalid\n"));
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (EINVAL);
	}

	/* parse green next action name */
	if ((rc = nvlist_lookup_string(nvlp, TSWTCL_GREEN_ACTION_NAME,
	    &next_action)) != 0) {
		nvlist_free(nvlp);
		tswtcl0dbg(("tswtcl_create_action:invalid config, green " \
		    "action name missing\n"));
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (rc);
	}
	if ((cfg_parms->green_action = ipp_action_lookup(next_action))
	    == IPP_ACTION_INVAL) {
		nvlist_free(nvlp);
		tswtcl0dbg(("tswtcl_create_action: green action invalid\n"));
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (EINVAL);
	}

	/* parse committed rate  - in bits / sec */
	if ((rc = nvlist_lookup_uint32(nvlp, TSWTCL_COMMITTED_RATE,
	    &cfg_parms->committed_rate)) != 0) {
		nvlist_free(nvlp);
		tswtcl0dbg(("tswtcl_create_action: invalid config, "\
		    " committed rate missing\n"));
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (rc);
	}

	/* parse peak rate  - in bits / sec */
	if ((rc = nvlist_lookup_uint32(nvlp, TSWTCL_PEAK_RATE,
	    &cfg_parms->peak_rate)) != 0) {
		nvlist_free(nvlp);
		tswtcl0dbg(("tswtcl_create_action: invalid config, "\
		    " peak rate missing\n"));
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (rc);
	}

	if (cfg_parms->peak_rate < cfg_parms->committed_rate) {
		nvlist_free(nvlp);
		tswtcl0dbg(("tswtcl_create_action: invalid config, "\
		    " peak rate < committed rate\n"));
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (EINVAL);
	}

	/* parse window - in msec */
	if ((rc = nvlist_lookup_uint32(nvlp, TSWTCL_WINDOW,
	    &cfg_parms->window)) != 0) {
		nvlist_free(nvlp);
		tswtcl0dbg(("tswtcl_create_action: invalid config, "\
		    " window missing\n"));
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (rc);
	}
	/* convert to nsec */
	cfg_parms->nsecwindow = (uint64_t)cfg_parms->window *
	    METER_MSEC_TO_NSEC;

	/* parse stats */
	if ((rc = nvlist_lookup_uint32(nvlp, IPP_ACTION_STATS_ENABLE, &bstats))
	    != 0) {
		cfg_parms->stats = B_FALSE;
	} else {
		cfg_parms->stats = (boolean_t)bstats;
	}

	nvlist_free(nvlp);

	/* Initialize other stuff */
	tswtcl_data = kmem_zalloc(TSWTCL_DATA_SZ, KM_NOSLEEP);
	if (tswtcl_data == NULL) {
		kmem_free(cfg_parms, TSWTCL_CFG_SZ);
		return (ENOMEM);
	}

	if (cfg_parms->stats) {
		if ((rc = tswtcl_statinit(aid, tswtcl_data)) != 0) {
			kmem_free(cfg_parms, TSWTCL_CFG_SZ);
			kmem_free(tswtcl_data, TSWTCL_DATA_SZ);
			return (rc);
		}
	}

	/* set action chain reference */
	if ((rc = ipp_action_ref(aid, cfg_parms->red_action, flags)) != 0) {
		tswtcl0dbg(("tswtcl_create_action: ipp_action_ref " \
		    "returned with error %d", rc));
		goto cleanup;
	}
	if ((rc = ipp_action_ref(aid, cfg_parms->yellow_action, flags)) != 0) {
		tswtcl0dbg(("tswtcl_create_action: ipp_action_ref " \
		    "returned with error %d", rc));
		rc2 = ipp_action_unref(aid, cfg_parms->red_action, flags);
		ASSERT(rc2 == 0);
		goto cleanup;
	}
	if ((rc = ipp_action_ref(aid, cfg_parms->green_action, flags)) != 0) {
		tswtcl0dbg(("tswtcl_create_action: ipp_action_ref " \
		    "returned with error %d", rc));
		rc2 = ipp_action_unref(aid, cfg_parms->red_action, flags);
		ASSERT(rc2 == 0);
		rc2 = ipp_action_unref(aid, cfg_parms->yellow_action, flags);
		ASSERT(rc2 == 0);
		goto cleanup;
	}

	/* Initializations */
	cfg_parms->pminusc = cfg_parms->peak_rate - cfg_parms->committed_rate;
	tswtcl_data->cfg_parms = cfg_parms;
	tswtcl_data->avg_rate = cfg_parms->committed_rate;
	mutex_init(&tswtcl_data->tswtcl_lock, NULL, MUTEX_DEFAULT, 0);
	tswtcl_data->win_front = gethrtime();
	ipp_action_set_ptr(aid, (void *)tswtcl_data);

	return (0);

cleanup:
	if (cfg_parms->stats) {
		ipp_stat_destroy(tswtcl_data->stats);
	}
	kmem_free(cfg_parms, TSWTCL_CFG_SZ);
	kmem_free(tswtcl_data, TSWTCL_DATA_SZ);
	return (rc);

}
Пример #28
0
/**
 * Check if a timer has expired.
 *
 * This function tests if a timer has expired and returns true or
 * false depending on its status.
 *
 * \param t A pointer to the timer
 *
 * \return Non-zero if the timer has expired, zero otherwise.
 *
 */
int
timer_expired(struct spark_timer *t)
{
  return (clock_time_t)(gethrtime() - t->start) >= (clock_time_t)t->interval;
}
Пример #29
0
uint64_t uv__hrtime(uv_clocktype_t type) {
  return gethrtime();
}
Пример #30
0
static void purge_single_server(lcb_server_t *server, lcb_error_t error,
                                hrtime_t min_nonstale,
                                hrtime_t *tmo_next)
{
    protocol_binary_request_header req;
    struct lcb_command_data_st ct;
    lcb_size_t nr;
    char *packet;
    lcb_size_t packetsize;
    char *keyptr;
    ringbuffer_t rest;
    ringbuffer_t *stream = &server->cmd_log;
    ringbuffer_t *cookies;
    ringbuffer_t *mirror = NULL; /* mirror buffer should be purged with main stream */
    lcb_connection_t conn = &server->connection;
    lcb_size_t send_size = 0;
    lcb_size_t stream_size = ringbuffer_get_nbytes(stream);
    hrtime_t now = gethrtime();

    if (server->connection_ready) {
        cookies = &server->output_cookies;
    } else {
        cookies = &server->pending_cookies;
        mirror = &server->pending;
    }

    if (conn->output) {
        /* This will usually be false for v1 */
        send_size = ringbuffer_get_nbytes(conn->output);
    }

    lcb_assert(ringbuffer_initialize(&rest, 1024));


    do {
        int allocated = 0;
        lcb_uint32_t headersize;
        lcb_uint16_t nkey;

        nr = ringbuffer_peek(cookies, &ct, sizeof(ct));
        if (nr != sizeof(ct)) {
            break;
        }
        nr = ringbuffer_peek(stream, req.bytes, sizeof(req));
        if (nr != sizeof(req)) {
            break;
        }
        packetsize = (lcb_uint32_t)sizeof(req) + ntohl(req.request.bodylen);
        if (stream->nbytes < packetsize) {
            break;
        }
        if (min_nonstale && ct.start >= min_nonstale) {
            lcb_log(LOGARGS(server, INFO),
                    "Still have %d ms remaining for command",
                    (ct.start - min_nonstale) / 1000000);

            if (tmo_next) {
                *tmo_next = (ct.start - min_nonstale) + 1;
            }
            break;
        }

        lcb_log(LOGARGS(server, INFO),
                "Command with cookie=%p timed out from server %s:%s",
                ct.cookie,
                server->curhost.host,
                server->curhost.port);

        ringbuffer_consumed(cookies, sizeof(ct));

        lcb_assert(nr == sizeof(req));
        packet = stream->read_head;

        if (server->instance->histogram) {
            lcb_record_metrics(server->instance, now - ct.start,
                               req.request.opcode);
        }

        if (server->connection_ready &&
                stream_size > send_size && (stream_size - packetsize) < send_size) {
            /* Copy the rest of the current packet into the
               temporary stream */

            /* I do believe I have some IOV functions to do that? */
            lcb_size_t nbytes = packetsize - (stream_size - send_size);
            lcb_assert(ringbuffer_memcpy(&rest,
                                         conn->output,
                                         nbytes) == 0);
            ringbuffer_consumed(conn->output, nbytes);
            send_size -= nbytes;
        }
        stream_size -= packetsize;
        headersize = (lcb_uint32_t)sizeof(req) + req.request.extlen + htons(req.request.keylen);
        if (!ringbuffer_is_continous(stream, RINGBUFFER_READ, headersize)) {
            packet = malloc(headersize);
            if (packet == NULL) {
                lcb_error_handler(server->instance, LCB_CLIENT_ENOMEM, NULL);
                abort();
            }

            nr = ringbuffer_peek(stream, packet, headersize);
            if (nr != headersize) {
                lcb_error_handler(server->instance, LCB_EINTERNAL, NULL);
                free(packet);
                abort();
            }
            allocated = 1;
        }

        keyptr = packet + sizeof(req) + req.request.extlen;
        nkey = ntohs(req.request.keylen);

        failout_single_request(server, &req, &ct, error, keyptr, nkey, packet);

        if (allocated) {
            free(packet);
        }
        ringbuffer_consumed(stream, packetsize);
        if (mirror) {
            ringbuffer_consumed(mirror, packetsize);
        }
    } while (1); /* CONSTCOND */

    if (server->connection_ready && conn->output) {
        /* Preserve the rest of the stream */
        lcb_size_t nbytes = ringbuffer_get_nbytes(stream);
        send_size = ringbuffer_get_nbytes(conn->output);

        if (send_size >= nbytes) {
            ringbuffer_consumed(conn->output, send_size - nbytes);
            lcb_assert(ringbuffer_memcpy(&rest, conn->output, nbytes) == 0);
        }
        ringbuffer_reset(conn->output);
        ringbuffer_append(&rest, conn->output);
    }

    ringbuffer_destruct(&rest);
    lcb_maybe_breakout(server->instance);
}