void AddTimeData(const CNetAddr& ip, int64_t nTime)
{
    int64_t nOffsetSample = nTime - GetTime();

    LOCK(cs_nTimeOffset);
    // Ignore duplicates
    static std::set<CNetAddr> setKnown;
    if (!setKnown.insert(ip).second)
        return;

    // Add data
    static CMedianFilter<int64_t> vTimeOffsets(200,0);
    vTimeOffsets.input(nOffsetSample);
    LogPrintf("Added time data, samples %d, offset %+d (%+d minutes)\n", vTimeOffsets.size(), nOffsetSample, nOffsetSample/60);

    // There is a known issue here (see issue #4521):
    //
    // - The structure vTimeOffsets contains up to 200 elements, after which
    // any new element added to it will not increase its size, replacing the
    // oldest element.
    //
    // - The condition to update nTimeOffset includes checking whether the
    // number of elements in vTimeOffsets is odd, which will never happen after
    // there are 200 elements.
    //
    // But in this case the 'bug' is protective against some attacks, and may
    // actually explain why we've never seen attacks which manipulate the
    // clock offset.
    //
    // So we should hold off on fixing this and clean it up as part of
    // a timing cleanup that strengthens it in a number of other ways.
    //
    if (vTimeOffsets.size() >= 5 && vTimeOffsets.size() % 2 == 1) {
        int64_t nMedian = vTimeOffsets.median();
        std::vector<int64_t> vSorted = vTimeOffsets.sorted();
        // Only let other nodes change our time by so much
        if (abs64(nMedian) < 70 * 60)
            nTimeOffset = nMedian;
        else {
            nTimeOffset = 0;

            static bool fDone;
            if (!fDone) {
                // If nobody has a time different than ours but within 5 minutes of ours, give a warning
                bool fMatch = false;
                BOOST_FOREACH(int64_t nOffset, vSorted)
                if (nOffset != 0 && abs64(nOffset) < 5 * 60)
                    fMatch = true;

                if (!fMatch) {
                    fDone = true;
                    std::string strMessage = _("Warning: Please check that your computer's date and time are correct! If your clock is wrong Bitcoin Core will not work properly.");
                    strMiscWarning = strMessage;
                    LogPrintf("*** %s\n", strMessage);
                    uiInterface.ThreadSafeMessageBox(strMessage, "", CClientUIInterface::MSG_WARNING);
                }
            }
        }
Example #2
0
/*
 * The entries the table are used in all combinations, with + and - signs
 * preceding them.  The return value is checked to ensure it is range.
 * On 32-bit systems __divdi3 will be invoked for the 64-bit division.
 * On 64-bit system the native 64-bit divide will be used so __divdi3
 *  isn't used but we might as well stil run the test.
 */
static int
splat_generic_test_divdi3(struct file *file, void *arg)
{
	const int64_t tabs[] = {
	    0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
	    10, 11, 12, 13, 14, 15, 16, 1000, 2003,
	    32765, 32766, 32767, 32768, 32769, 32760,
	    65533, 65534, 65535, 65536, 65537, 65538,
	    0x7ffffffeLL, 0x7fffffffLL, 0x80000000LL, 0x80000001LL,
	    0x7000000000000000LL, 0x7000000080000000LL, 0x7000000080000001LL,
	    0x7fffffffffffffffLL, 0x7fffffff8fffffffLL, 0x7fffffff8ffffff1LL,
	    0x7fffffff00000000LL, 0x7fffffff80000000LL, 0x7fffffff00000001LL,
	    0x0123456789abcdefLL, 0x00000000abcdef01LL, 0x0000000012345678LL,
#if BITS_PER_LONG == 32
	    0x8000000000000000LL, 0x8000000080000000LL, 0x8000000080000001LL,
#endif
	};
	int64_t u, v, q, r;
	int n, i, j, k, errors = 0;

	splat_vprint(file, SPLAT_GENERIC_TEST6_NAME, "%s",
	    "Testing signed 64-bit division.\n");
	n = sizeof(tabs) / sizeof(tabs[0]);
	for (i = 0; i < n; i++) {
		for (j = 1; j < n; j++) {
			for (k = 0; k <= 3; k++) {
				u = (k & 1)  ? -tabs[i] : tabs[i];
				v = (k >= 2) ? -tabs[j] : tabs[j];

				q = u / v; /* __divdi3 */
				r = u - q * v;
				if (abs64(q) >  abs64(u) ||
				    abs64(r) >= abs64(v) ||
				    (r != 0 && (r ^ u) < 0)) {
					splat_vprint(file,
					    SPLAT_GENERIC_TEST6_NAME,
					    "%016llx/%016llx != %016llx "
					    "rem %016llx\n", u, v, q, r);
					errors++;
				}
			}
		}
	}

	if (errors) {
		splat_vprint(file, SPLAT_GENERIC_TEST6_NAME,
		    "Failed %d/%d tests\n", errors, n * (n - 1));
		return -ERANGE;
	}

	splat_vprint(file, SPLAT_GENERIC_TEST6_NAME,
	    "Passed all %d tests\n", n * (n - 1));

	return 0;
}
Example #3
0
static uint64 GetMicroseconds()
{
	static bool time_init = false;
	if (!time_init) {
		time_init = true;
		Time_Initialize();
	}

	uint64 counter;
	uint64 tick;

	QueryPerformanceCounter((LARGE_INTEGER*) &counter);
	tick = UTGetTickCount64();

	// unfortunately, QueryPerformanceCounter is not guaranteed
	// to be monotonic. Make it so.
	int64 ret = (int64)(((int64)counter - (int64)startPerformanceCounter) / counterPerMicrosecond);
	// if the QPC clock leaps more than one second off GetTickCount64()
	// something is seriously fishy. Adjust QPC to stay monotonic
	int64 tick_diff = tick - startGetTickCount;
	if (abs64(ret / 100000 - tick_diff / 100) > 10) {
		startPerformanceCounter -= (uint64)((int64)(tick_diff * 1000 - ret) * counterPerMicrosecond);
		ret = (int64)((counter - startPerformanceCounter) / counterPerMicrosecond);
	}
	return ret;
}
static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
				     s64 sample)
{
	s64 delta = sample - s->stats[index];
	s->stats[index] += (delta >> 3);
	index++;
	s->stats[index] += ((abs64(delta) - s->stats[index]) >> 2);
}
Example #5
0
bool CRenderSystemGLES::PresentRender(const CDirtyRegionList &dirty)
{
  if (!m_bRenderCreated)
    return false;

  if (m_iVSyncMode != 0 && m_iSwapRate != 0) 
  {
    int64_t curr, diff, freq;
    curr = CurrentHostCounter();
    freq = CurrentHostFrequency();

    if(m_iSwapStamp == 0)
      m_iSwapStamp = curr;

    /* calculate our next swap timestamp */
    diff = curr - m_iSwapStamp;
    diff = m_iSwapRate - diff % m_iSwapRate;
    m_iSwapStamp = curr + diff;

    /* sleep as close as we can before, assume 1ms precision of sleep *
     * this should always awake so that we are guaranteed the given   *
     * m_iSwapTime to do our swap                                     */
    diff = (diff - m_iSwapTime) * 1000 / freq;
    if (diff > 0)
      Sleep((DWORD)diff);
  }
  
  bool result = PresentRenderImpl(dirty);
  
  if (m_iVSyncMode && m_iSwapRate != 0)
  {
    int64_t curr, diff;
    curr = CurrentHostCounter();

    diff = curr - m_iSwapStamp;
    m_iSwapStamp = curr;

    if (abs64(diff - m_iSwapRate) < abs64(diff))
      CLog::Log(LOGDEBUG, "%s - missed requested swap",__FUNCTION__);
  }
  
  return result;
}
Example #6
0
static void kvm_pit_update_clock_offset(KVMPITState *s)
{
    int64_t offset, clock_offset;
    struct timespec ts;
    int i;

    /*
     * Measure the delta between CLOCK_MONOTONIC, the base used for
     * kvm_pit_channel_state::count_load_time, and vm_clock. Take the
     * minimum of several samples to filter out scheduling noise.
     */
    clock_offset = INT64_MAX;
    for (i = 0; i < CALIBRATION_ROUNDS; i++) {
        offset = qemu_get_clock_ns(vm_clock);
        clock_gettime(CLOCK_MONOTONIC, &ts);
        offset -= ts.tv_nsec;
        offset -= (int64_t)ts.tv_sec * 1000000000;
        if (abs64(offset) < abs64(clock_offset)) {
            clock_offset = offset;
        }
    }
    s->kernel_clock_offset = clock_offset;
}
Example #7
0
/*
 * if the mate pair is "good" (see definition of good above), 
 * the method returns the distance between the two, including the reads
 * themselves. 
 * 
 * otherwise, returns 0
 */
uint64_t good_mp_dst(mapping_t * fwd_map, mapping_t * rev_map) {

	// testing variables
	uint64_t dist; // distance
	uint64_t cs_fwd; // contigstart/end of fwd read
	uint64_t cs_rev; // contigstart/end of rev read
	char s_fwd; // strand of fwd read
	char s_rev; // strand of reverse read
	
	// booleans
	int are_plus_strands, are_minus_strands, is_small_dist, is_plus_order;
	int is_minus_order;
	
	// get the distance
	if (fwd_map->contigstart < rev_map->contigstart) {
		cs_fwd = fwd_map->contigstart; 
		cs_rev = rev_map->contigend;					
	} else {
		cs_fwd = fwd_map->contigend; 
		cs_rev = rev_map->contigstart;
	}
	dist = abs64(cs_fwd - cs_rev);
	is_small_dist = (dist < distcutoff);
	
	// get the strands
	s_fwd = fwd_map->strand;
	s_rev = rev_map->strand;
	are_plus_strands = ((s_fwd == s_rev) && (s_fwd == '+'));
	are_minus_strands = ((s_fwd == s_rev) && (s_fwd == '-'));
		
	// get the order
	is_plus_order = (are_plus_strands && (cs_rev < cs_fwd));
	is_minus_order = (are_minus_strands && (cs_fwd < cs_rev));
	
	// return distance if this is a good mapping
	if ((is_small_dist) &&  (is_plus_order || is_minus_order)) {
		return dist;
	} else {
		return 0;
	}
}
int64 AudioFormatReader::searchForLevel (int64 startSample,
                                         int64 numSamplesToSearch,
                                         const double magnitudeRangeMinimum,
                                         const double magnitudeRangeMaximum,
                                         const int minimumConsecutiveSamples)
{
    if (numSamplesToSearch == 0)
        return -1;

    const int bufferSize = 4096;
    HeapBlock<int> tempSpace (bufferSize * 2 + 64);

    int* tempBuffer[3];
    tempBuffer[0] = tempSpace.getData();
    tempBuffer[1] = tempSpace.getData() + bufferSize;
    tempBuffer[2] = 0;

    int consecutive = 0;
    int64 firstMatchPos = -1;

    jassert (magnitudeRangeMaximum > magnitudeRangeMinimum);

    const double doubleMin = jlimit (0.0, (double) std::numeric_limits<int>::max(), magnitudeRangeMinimum * std::numeric_limits<int>::max());
    const double doubleMax = jlimit (doubleMin, (double) std::numeric_limits<int>::max(), magnitudeRangeMaximum * std::numeric_limits<int>::max());
    const int intMagnitudeRangeMinimum = roundToInt (doubleMin);
    const int intMagnitudeRangeMaximum = roundToInt (doubleMax);

    while (numSamplesToSearch != 0)
    {
        const int numThisTime = (int) jmin (abs64 (numSamplesToSearch), (int64) bufferSize);
        int64 bufferStart = startSample;

        if (numSamplesToSearch < 0)
            bufferStart -= numThisTime;

        if (bufferStart >= (int) lengthInSamples)
            break;

        read (tempBuffer, 2, bufferStart, numThisTime, false);

        int num = numThisTime;
        while (--num >= 0)
        {
            if (numSamplesToSearch < 0)
                --startSample;

            bool matches = false;
            const int index = (int) (startSample - bufferStart);

            if (usesFloatingPointData)
            {
                const float sample1 = std::abs (((float*) tempBuffer[0]) [index]);

                if (sample1 >= magnitudeRangeMinimum
                     && sample1 <= magnitudeRangeMaximum)
                {
                    matches = true;
                }
                else if (numChannels > 1)
                {
                    const float sample2 = std::abs (((float*) tempBuffer[1]) [index]);

                    matches = (sample2 >= magnitudeRangeMinimum
                                 && sample2 <= magnitudeRangeMaximum);
                }
            }
            else
            {
                const int sample1 = abs (tempBuffer[0] [index]);

                if (sample1 >= intMagnitudeRangeMinimum
                     && sample1 <= intMagnitudeRangeMaximum)
                {
                    matches = true;
                }
                else if (numChannels > 1)
                {
                    const int sample2 = abs (tempBuffer[1][index]);

                    matches = (sample2 >= intMagnitudeRangeMinimum
                                 && sample2 <= intMagnitudeRangeMaximum);
                }
            }

            if (matches)
            {
                if (firstMatchPos < 0)
                    firstMatchPos = startSample;

                if (++consecutive >= minimumConsecutiveSamples)
                {
                    if (firstMatchPos < 0 || firstMatchPos >= lengthInSamples)
                        return -1;

                    return firstMatchPos;
                }
            }
            else
            {
                consecutive = 0;
                firstMatchPos = -1;
            }

            if (numSamplesToSearch > 0)
                ++startSample;
        }

        if (numSamplesToSearch > 0)
            numSamplesToSearch -= numThisTime;
        else
            numSamplesToSearch += numThisTime;
    }

    return -1;
}
Example #9
0
uint64
int64_gcd (int64 a, int64 b)
{
  return uint64_gcd ((uint64)abs64 (a),
		     (uint64)abs64 (b));
}
Example #10
0
uint64 int64_lcm (int64 a, int64 b)
{
  return uint64_lcm ((uint64)abs64 (a),
		     (uint64)abs64 (b));
}
void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
{
    LOCK(cs_nTimeOffset);
    // Ignore duplicates
    static std::set<CNetAddr> setKnown;
    if (setKnown.size() == DIGIBYTE_TIMEDATA_MAX_SAMPLES)
        return;
    if (!setKnown.insert(ip).second)
        return;

    // Add data
    static CMedianFilter<int64_t> vTimeOffsets(DIGIBYTE_TIMEDATA_MAX_SAMPLES, 0);
    vTimeOffsets.input(nOffsetSample);
    LogPrint(BCLog::NET,"added time data, samples %d, offset %+d (%+d minutes)\n", vTimeOffsets.size(), nOffsetSample, nOffsetSample/60);

    // There is a known issue here (see issue #4521):
    //
    // - The structure vTimeOffsets contains up to 200 elements, after which
    // any new element added to it will not increase its size, replacing the
    // oldest element.
    //
    // - The condition to update nTimeOffset includes checking whether the
    // number of elements in vTimeOffsets is odd, which will never happen after
    // there are 200 elements.
    //
    // But in this case the 'bug' is protective against some attacks, and may
    // actually explain why we've never seen attacks which manipulate the
    // clock offset.
    //
    // So we should hold off on fixing this and clean it up as part of
    // a timing cleanup that strengthens it in a number of other ways.
    //
    if (vTimeOffsets.size() >= 5 && vTimeOffsets.size() % 2 == 1)
    {
        int64_t nMedian = vTimeOffsets.median();
        std::vector<int64_t> vSorted = vTimeOffsets.sorted();
        // Only let other nodes change our time by so much
        if (abs64(nMedian) <= std::max<int64_t>(0, gArgs.GetArg("-maxtimeadjustment", DEFAULT_MAX_TIME_ADJUSTMENT)))
        {
            nTimeOffset = nMedian;
        }
        else
        {
            nTimeOffset = 0;

            static bool fDone;
            if (!fDone)
            {
                // If nobody has a time different than ours but within 5 minutes of ours, give a warning
                bool fMatch = false;
                for (int64_t nOffset : vSorted)
                    if (nOffset != 0 && abs64(nOffset) < 5 * 60)
                        fMatch = true;

                if (!fMatch)
                {
                    fDone = true;
                    std::string strMessage = strprintf(_("Please check that your computer's date and time are correct! If your clock is wrong, %s will not work properly."), _(PACKAGE_NAME));
                    SetMiscWarning(strMessage);
                    uiInterface.ThreadSafeMessageBox(strMessage, "", CClientUIInterface::MSG_WARNING);
                }
            }
        }

        if (LogAcceptCategory(BCLog::NET)) {
            for (int64_t n : vSorted) {
                LogPrint(BCLog::NET, "%+d  ", n); /* Continued */
            }
            LogPrint(BCLog::NET, "|  "); /* Continued */

            LogPrint(BCLog::NET, "nTimeOffset = %+d  (%+d minutes)\n", nTimeOffset, nTimeOffset/60);
        }
    }
}
Example #12
0
CStepMatrixColumn::CStepMatrixColumn(const CZeroSet & set,
                                     CStepMatrixColumn const * pPositive,
                                     CStepMatrixColumn const * pNegative):
    mZeroSet(set),
    mReaction(),
    mIterator(NULL)
{
  C_INT64 PosMult = -pNegative->getMultiplier();
  C_INT64 NegMult = pPositive->getMultiplier();

  C_INT64 GCD1 = abs64(PosMult);
  C_INT64 GCD2 = abs64(NegMult);

  // Divide PosMult and NegMult by GCD(PosMult, NegMult);
  CBitPatternTreeMethod::GCD(GCD1, GCD2);

  if (GCD1 != 1)
    {
      PosMult /= GCD1;
      NegMult /= GCD1;
    }

  // -1 is used to identify the start of the GCD search.
  GCD1 = -1;

  mReaction.resize(pPositive->mReaction.size());
  std::vector< C_INT64 >::iterator it = mReaction.begin();
  std::vector< C_INT64 >::iterator end = mReaction.end();

  std::vector< C_INT64 >::const_iterator itPos = pPositive->mReaction.begin();
  std::vector< C_INT64 >::const_iterator itNeg = pNegative->mReaction.begin();

  for (; it != end; ++it, ++itPos, ++itNeg)
    {
      // TODO We need to check that we do not have numerical overflow
      *it = PosMult * *itPos + NegMult * *itNeg;

      if (*it == 0 || GCD1 == 1)
        {
          continue;
        }

      if (GCD1 == -1)
        {
          GCD1 = abs64(*it);
          continue;
        }

      GCD2 = abs64(*it);

      CBitPatternTreeMethod::GCD(GCD1, GCD2);
    }

  if (GCD1 > 1)
    {
      for (it = mReaction.begin(); it != end; ++it)
        {
          *it /= GCD1;
        }
    }
}
Example #13
0
/*
 * Compute the probabilities, given a mapping. Add it to the mate pair array
 * if the probabilities pass the thresholds.
 */
inline int add_p_stats(mapping_t * fwd_map, mapping_t * rev_map, 
		mate_pair_val *mp_set, double * totnormodds, int * mp_set_index ) {
	
	// if discordant are asked for and the contig names don't match
	if (!allow_diff_chr && 
			(strcmp(fwd_map->contigname, rev_map->contigname) != 0))
		return 0;
	
	
	
	// contig start and end
	uint64_t cs_fwd; // contigstart/end of fwd read
	uint64_t cs_rev; // contigstart/end of rev read
	
	// get the distance
	if (fwd_map->contigstart < rev_map->contigstart) {
		cs_fwd = fwd_map->contigstart; 
		cs_rev = rev_map->contigend;					
	} else {
		cs_fwd = fwd_map->contigend; 
		cs_rev = rev_map->contigstart;
	}
	uint64_t dist = abs64(cs_fwd - cs_rev);
	
	
	
	// pgenome
	double pgenome_fwd = fwd_map->pgenome;
	double pgenome_rev = rev_map->pgenome;
	int pgenome_bin = 0;
	double pgenome_cumsum = 0;
	double pgenome = 0;
		
	if (discordant) {
		pgenome = pgenome_fwd * pgenome_rev;
	} else {
		pgenome_bin = (int) floor((fabs((double)(dist) - gl_mean) 
				* 1.0 / hist_distcutoff) * HIST_BINS);
	
		if (pgenome_bin >= HIST_BINS)
			pgenome_cumsum = 0;
		else
			pgenome_cumsum = gl_hist_cumsum[pgenome_bin];
	
		pgenome = pgenome_fwd * pgenome_rev * pgenome_cumsum;
	}
	
	pgenome = MIN(ALMOST_ONE, pgenome);
	
	if (pgenome < pgenome_cutoff)
		return 0;

	//fprintf(stderr, "pgenome1: %f, pgenome2: %f, pgenome: %f, pgenome_bin: %i,,small_fabs:%f pgenome_cumsum:%f\n", 
		//		pgenome_fwd, pgenome_rev, pgenome, pgenome_bin, fabs((double)(dist) - gl_mean), pgenome_cumsum);

	
	// pchance
	double pchance;
		
	// pchance
	double pchance_fwd = fwd_map->pchance;
	double pchance_rev = rev_map->pchance;
	
	if (discordant || quickmode) {
		pchance = pchance_fwd * pchance_rev;
	} else { 
		double pchance_fwd_alt = 1 - pow(1 - pchance_fwd, 
				fabs((double)(dist) - gl_mean + 1) * 1.0 / (double)genome_length);
		double pchance_rev_alt = 1 - pow(1 - pchance_rev, 
				fabs((double)(dist) - gl_mean + 1) * 1.0 / (double)genome_length);
		pchance = (pchance_fwd * pchance_rev_alt + 
				pchance_rev * pchance_fwd_alt) / 2;
	}
	pchance = MAX(ALMOST_ZERO, pchance);
	
	if (pchance > pchance_cutoff)
		return 0;
	

	
	
	// assigning the values
	mp_set[*mp_set_index].fwd_rs = fwd_map;
	mp_set[*mp_set_index].rev_rs = rev_map;
	mp_set[*mp_set_index].pchance = pchance;
	mp_set[*mp_set_index].pgenome = pgenome;
	mp_set[*mp_set_index].normodds = pgenome/pchance;
	mp_set[*mp_set_index].dist = dist;
	mp_set_index[0]++;
	
	totnormodds[0] += pgenome/pchance;
	
	return 1;
}
// static
bool CBitPatternTreeMethod::CalculateKernel(CMatrix< C_INT64 > & matrix,
    CMatrix< C_INT64 > & kernel,
    CVector< size_t > & rowPivot)
{
  // std::cout << matrix << std::endl;

  // Gaussian elimination
  size_t NumRows = matrix.numRows();
  size_t NumCols = matrix.numCols();

  assert(NumRows > 0);
  assert(NumCols > 0);

  // Initialize row pivots
  rowPivot.resize(NumRows);
  size_t * pPivot = rowPivot.array();

  for (size_t i = 0; i < NumRows; i++, pPivot++)
    {
      *pPivot = i;
    }

  CVector< size_t > RowPivot(rowPivot);

  CVector< C_INT64 > Identity(NumRows);
  Identity = 1;

  C_INT64 * pColumn = matrix.array();
  C_INT64 * pColumnEnd = pColumn + NumCols;

  C_INT64 * pActiveRow;
  C_INT64 * pActiveRowStart = pColumn;
  C_INT64 * pActiveRowEnd = pColumnEnd;

  C_INT64 * pRow;
  C_INT64 * pRowEnd = pColumn + matrix.size();

  C_INT64 * pCurrent;
  C_INT64 * pIdentity;

  CVector< C_INT64 > SwapTmp(NumCols);

  size_t CurrentRowIndex = 0;
  size_t CurrentColumnIndex = 0;
  size_t NonZeroIndex = 0;

  CVector< bool > IgnoredColumns(NumCols);
  IgnoredColumns = false;
  bool * pIgnoredColumn = IgnoredColumns.array();
  size_t IgnoredColumnCount = 0;

  // For each column
  for (; pColumn < pColumnEnd; ++pColumn, ++CurrentColumnIndex, ++pIgnoredColumn)
    {
      assert(CurrentColumnIndex == CurrentRowIndex + IgnoredColumnCount);

      // Find non zero entry in current column.
      pRow = pActiveRowStart + CurrentColumnIndex;
      NonZeroIndex = CurrentRowIndex;

      for (; pRow < pRowEnd; pRow += NumCols, ++NonZeroIndex)
        {
          if (*pRow != 0)
            break;
        }

      if (NonZeroIndex >= NumRows)
        {
          *pIgnoredColumn = true;
          IgnoredColumnCount++;

          continue;
        }

      if (NonZeroIndex != CurrentRowIndex)
        {
          // Swap rows
          memcpy(SwapTmp.array(), matrix[CurrentRowIndex], NumCols * sizeof(C_INT64));
          memcpy(matrix[CurrentRowIndex], matrix[NonZeroIndex], NumCols * sizeof(C_INT64));
          memcpy(matrix[NonZeroIndex], SwapTmp.array(), NumCols * sizeof(C_INT64));

          // Record pivot
          size_t tmp = RowPivot[CurrentRowIndex];
          RowPivot[CurrentRowIndex] = RowPivot[NonZeroIndex];
          RowPivot[NonZeroIndex] = tmp;

          C_INT64 TMP = Identity[CurrentRowIndex];
          Identity[CurrentRowIndex]  = Identity[NonZeroIndex];
          Identity[NonZeroIndex] = TMP;
        }

      if (*(pActiveRowStart + CurrentColumnIndex) < 0)
        {
          for (pRow = pActiveRowStart; pRow < pActiveRowEnd; ++pRow)
            {
              *pRow *= -1;
            }

          Identity[CurrentRowIndex] *= -1;
        }

      // For each row
      pRow = pActiveRowStart + NumCols;
      pIdentity = Identity.array() + CurrentRowIndex + 1;

      C_INT64 ActiveRowValue = *(pActiveRowStart + CurrentColumnIndex);
      *(pActiveRowStart + CurrentColumnIndex) = Identity[CurrentRowIndex];

      for (; pRow < pRowEnd; pRow += NumCols, ++pIdentity)
        {
          C_INT64 RowValue = *(pRow + CurrentColumnIndex);

          if (RowValue == 0)
            continue;

          *(pRow + CurrentColumnIndex) = 0;

          // compute GCD(*pActiveRowStart, *pRow)
          C_INT64 GCD1 = abs64(ActiveRowValue);
          C_INT64 GCD2 = abs64(RowValue);

          GCD(GCD1, GCD2);

          C_INT64 alpha = ActiveRowValue / GCD1;
          C_INT64 beta = RowValue / GCD1;

          // update rest of row
          pActiveRow = pActiveRowStart;
          pCurrent = pRow;
          *pIdentity *= alpha;

          GCD1 = abs64(*pIdentity);

          for (; pActiveRow < pActiveRowEnd; ++pActiveRow, ++pCurrent)
            {
              // Assert that we do not have a numerical overflow.
              assert(fabs(((C_FLOAT64) alpha) *((C_FLOAT64) * pCurrent) - ((C_FLOAT64) beta) *((C_FLOAT64) * pActiveRow)) < std::numeric_limits< C_INT64 >::max());

              *pCurrent = alpha * *pCurrent - beta * *pActiveRow;

              // We check that the row values do not have any common divisor.
              if (GCD1 > 1 &&
                  (GCD2 = abs64(*pCurrent)) > 0)
                {
                  GCD(GCD1, GCD2);
                }
            }

          if (GCD1 > 1)
            {
              *pIdentity /= GCD1;

              pActiveRow = pActiveRowStart;
              pCurrent = pRow;

              for (; pActiveRow < pActiveRowEnd; ++pActiveRow, ++pCurrent)
                {
                  *pCurrent /= GCD1;
                }
            }
        }

      pActiveRowStart += NumCols;
      pActiveRowEnd += NumCols;
      CurrentRowIndex++;
    }

  assert(CurrentColumnIndex == CurrentRowIndex + IgnoredColumnCount);
  assert(CurrentColumnIndex == NumCols);

  // std::cout << matrix << std::endl;
  // std::cout << IgnoredColumns << std::endl;
  // std::cout << Identity << std::endl;
  // std::cout << RowPivot << std::endl << std::endl;

  size_t KernelRows = NumRows;
  size_t KernelCols = NumRows + IgnoredColumnCount - NumCols;

  assert(KernelCols > 0);

  kernel.resize(KernelRows, KernelCols);

  CMatrix< C_INT64 > Kernel(KernelRows, KernelCols);
  Kernel = 0;

  C_INT64 * pKernelInt = Kernel.array();
  C_INT64 * pKernelIntEnd = pKernelInt + Kernel.size();

  pActiveRowStart = matrix[CurrentRowIndex];
  pActiveRowEnd = matrix[NumRows];

  // Null space matrix identity part
  pIdentity = Identity.array() + CurrentRowIndex;
  C_INT64 * pKernelColumn = Kernel.array();

  for (; pActiveRowStart < pActiveRowEnd; pActiveRowStart += NumCols, ++pKernelColumn, ++pIdentity)
    {
      pKernelInt = pKernelColumn;
      pIgnoredColumn = IgnoredColumns.array();

      pRow = pActiveRowStart;
      pRowEnd = pRow + NumCols;

      if (*pIdentity < 0)
        {
          *pIdentity *= -1;

          for (; pRow < pRowEnd; ++pRow, ++pIgnoredColumn)
            {
              if (*pIgnoredColumn)
                {
                  continue;
                }

              *pKernelInt = - *pRow;
              pKernelInt += KernelCols;
            }
        }
      else
        {
          for (; pRow < pRowEnd; ++pRow, ++pIgnoredColumn)
            {
              if (*pIgnoredColumn)
                {
                  continue;
                }

              *pKernelInt = *pRow;
              pKernelInt += KernelCols;
            }
        }
    }

  pIdentity = Identity.array() + CurrentRowIndex;
  pKernelInt = Kernel[CurrentRowIndex];

  for (; pKernelInt < pKernelIntEnd; pKernelInt += KernelCols + 1, ++pIdentity)
    {
      *pKernelInt = *pIdentity;
    }

  // std::cout << Kernel << std::endl;
  // std::cout << RowPivot << std::endl << std::endl;

  // Undo the reordering introduced by Gaussian elimination to the kernel matrix.
  pPivot = RowPivot.array();
  pRow = Kernel.array();
  pRowEnd = pRow + Kernel.size();

  for (; pRow < pRowEnd; ++pPivot, pRow += KernelCols)
    {
      memcpy(kernel[*pPivot], pRow, KernelCols * sizeof(C_INT64));
    }

  // std::cout << kernel << std::endl << std::endl;
  // std::cout << rowPivot << std::endl << std::endl;

  return true;
}
Example #15
0
int64_t DoReq(SOCKET sockfd, socklen_t servlen, struct sockaddr cliaddr) {


#ifdef WIN32
    u_long nOne = 1;
    if (ioctlsocket(sockfd, FIONBIO, &nOne) == SOCKET_ERROR) {
        printf("ConnectSocket() : ioctlsocket non-blocking setting failed, error %d\n", WSAGetLastError());
#else
    if (fcntl(sockfd, F_SETFL, O_NONBLOCK) == SOCKET_ERROR) {
        printf("ConnectSocket() : fcntl non-blocking setting failed, error %d\n", errno);
#endif
        return -2;
    }

    struct timeval timeout = {10, 0};
    struct pkt *msg = new pkt;
    struct pkt *prt  = new pkt;
    time_t seconds_transmit;
    int len = 48;

    msg->li_vn_mode=227;
    msg->stratum=0;
    msg->ppoll=4;
    msg->precision=0;
    msg->rootdelay=0;
    msg->rootdispersion=0;

    msg->ref.Ul_i.Xl_i=0;
    msg->ref.Ul_f.Xl_f=0;
    msg->org.Ul_i.Xl_i=0;
    msg->org.Ul_f.Xl_f=0;
    msg->rec.Ul_i.Xl_i=0;
    msg->rec.Ul_f.Xl_f=0;
    msg->xmt.Ul_i.Xl_i=0;
    msg->xmt.Ul_f.Xl_f=0;

    int retcode = sendto(sockfd, (char *) msg, len, 0, &cliaddr, servlen);
    if (retcode < 0) {
        printf("sendto() failed: %d\n", retcode);
        seconds_transmit = -3;
        goto _end;
    }

    fd_set fdset;
    FD_ZERO(&fdset);
    FD_SET(sockfd, &fdset);

    retcode = select(sockfd + 1, &fdset, NULL, NULL, &timeout);
    if (retcode <= 0) {
        printf("recvfrom() error\n");
        seconds_transmit = -4;
        goto _end;
    }

    recvfrom(sockfd, (char *) msg, len, 0, NULL, NULL);
    ntohl_fp(&msg->xmt, &prt->xmt);
    Ntp2Unix(prt->xmt.Ul_i.Xl_ui, seconds_transmit);

    _end:

    delete msg;
    delete prt;

    return seconds_transmit;
}

int64_t NtpGetTime(CNetAddr& ip) {
    struct sockaddr cliaddr;

    SOCKET sockfd;
    socklen_t servlen;

    if (!InitWithRandom(sockfd, servlen, &cliaddr))
        return -1;

    ip = CNetAddr(((sockaddr_in *)&cliaddr)->sin_addr);
    int64_t nTime = DoReq(sockfd, servlen, cliaddr);

    closesocket(sockfd);

    return nTime;
}

int64_t NtpGetTime(const std::string &strHostName)
{
    struct sockaddr cliaddr;

    SOCKET sockfd;
    socklen_t servlen;

    if (!InitWithHost(strHostName, sockfd, servlen, &cliaddr))
        return -1;

    int64_t nTime = DoReq(sockfd, servlen, cliaddr);

    closesocket(sockfd);

    return nTime;
}

// NTP server, which we unconditionally trust. This may be your own installation of ntpd somewhere, for example. 
// "localhost" means "trust no one"
std::string strTrustedUpstream = "localhost";

// Current offset
int64_t nNtpOffset = INT64_MAX;

int64_t GetNtpOffset() {
    return nNtpOffset;
}

void ThreadNtpSamples(void* parg) {
    const int64_t nMaxOffset = 86400; // Not a real limit, just sanity threshold.

    printf("Trying to find NTP server at localhost...\n");

    std::string strLocalHost = "127.0.0.1";
    if (NtpGetTime(strLocalHost) == GetTime()) {
        printf("There is NTP server active at localhost,  we don't need NTP thread.\n");

        nNtpOffset = 0;
        return;
    }

    printf("ThreadNtpSamples started\n");
    vnThreadsRunning[THREAD_NTP]++;

    // Make this thread recognisable as time synchronization thread
    RenameThread("Chipcoin-ntp-samples");

    CMedianFilter<int64_t> vTimeOffsets(200,0);

    while (!fShutdown) {
        if (strTrustedUpstream != "localhost") {
            // Trying to get new offset sample from trusted NTP server.
            int64_t nClockOffset = NtpGetTime(strTrustedUpstream) - GetTime();

            if (abs64(nClockOffset) < nMaxOffset) {
                // Everything seems right, remember new trusted offset.
                printf("ThreadNtpSamples: new offset sample from %s, offset=%" PRId64 ".\n", strTrustedUpstream.c_str(), nClockOffset);
                nNtpOffset = nClockOffset;
            }
            else {
                // Something went wrong, disable trusted offset sampling.
                nNtpOffset = INT64_MAX;
                strTrustedUpstream = "localhost";

                int nSleepMinutes = 1 + GetRandInt(9); // Sleep for 1-10 minutes.
                for (int i = 0; i < nSleepMinutes * 60 && !fShutdown; i++)
                    MilliSleep(1000);

                continue;
            }
        }
        else {
            // Now, trying to get 2-4 samples from random NTP servers.
            int nSamplesCount = 2 + GetRandInt(2);

            for (int i = 0; i < nSamplesCount; i++) {
                CNetAddr ip;
                int64_t nClockOffset = NtpGetTime(ip) - GetTime();

                if (abs64(nClockOffset) < nMaxOffset) { // Skip the deliberately wrong timestamps
                    printf("ThreadNtpSamples: new offset sample from %s, offset=%" PRId64 ".\n", ip.ToString().c_str(), nClockOffset);
                    vTimeOffsets.input(nClockOffset);
                }
            }

            if (vTimeOffsets.size() > 1) {
                nNtpOffset = vTimeOffsets.median();
            }
            else {
                // Not enough offsets yet, try to collect additional samples later.
                nNtpOffset = INT64_MAX;
                int nSleepMinutes = 1 + GetRandInt(4); // Sleep for 1-5 minutes.
                for (int i = 0; i < nSleepMinutes * 60 && !fShutdown; i++) 
                    MilliSleep(1000);
                continue;
            }
        }

        if (GetNodesOffset() == INT_MAX && abs64(nNtpOffset) > 40 * 60)
        {
            // If there is not enough node offsets data and NTP time offset is greater than 40 minutes then give a warning.
            std::string strMessage = _("Warning: Please check that your computer's date and time are correct! If your clock is wrong Chipcoin will not work properly.");
            strMiscWarning = strMessage;
            printf("*** %s\n", strMessage.c_str());
            uiInterface.ThreadSafeMessageBox(strMessage+" ", std::string("Chipcoin"), CClientUIInterface::OK | CClientUIInterface::ICON_EXCLAMATION);
        }

        printf("nNtpOffset = %+" PRId64 "  (%+" PRId64 " minutes)\n", nNtpOffset, nNtpOffset/60);

        int nSleepHours = 1 + GetRandInt(5); // Sleep for 1-6 hours.
        for (int i = 0; i < nSleepHours * 3600 && !fShutdown; i++)
            MilliSleep(1000);
    }

    vnThreadsRunning[THREAD_NTP]--;
    printf("ThreadNtpSamples exited\n");
}
Example #16
0
/*
 * Disable vblank irq's on crtc, make sure that last vblank count
 * of hardware and corresponding consistent software vblank counter
 * are preserved, even if there are any spurious vblank irq's after
 * disable.
 */
static void vblank_disable_and_save(struct drm_device *dev, int crtc)
{
	u32 vblcount;
	s64 diff_ns;
	int vblrc;
	struct timeval tvblank;
	int count = DRM_TIMESTAMP_MAXRETRIES;

	/* Prevent vblank irq processing while disabling vblank irqs,
	 * so no updates of timestamps or count can happen after we've
	 * disabled. Needed to prevent races in case of delayed irq's.
	 */
	mtx_enter(&dev->vblank_time_lock);

	dev->driver->disable_vblank(dev, crtc);
	dev->vblank_enabled[crtc] = 0;

	/* No further vblank irq's will be processed after
	 * this point. Get current hardware vblank count and
	 * vblank timestamp, repeat until they are consistent.
	 *
	 * FIXME: There is still a race condition here and in
	 * drm_update_vblank_count() which can cause off-by-one
	 * reinitialization of software vblank counter. If gpu
	 * vblank counter doesn't increment exactly at the leading
	 * edge of a vblank interval, then we can lose 1 count if
	 * we happen to execute between start of vblank and the
	 * delayed gpu counter increment.
	 */
	do {
		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);

	if (!count)
		vblrc = 0;

	/* Compute time difference to stored timestamp of last vblank
	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
	 */
	vblcount = atomic_read(&dev->_vblank_count[crtc]);
	diff_ns = timeval_to_ns(&tvblank) -
		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));

	/* If there is at least 1 msec difference between the last stored
	 * timestamp and tvblank, then we are currently executing our
	 * disable inside a new vblank interval, the tvblank timestamp
	 * corresponds to this new vblank interval and the irq handler
	 * for this vblank didn't run yet and won't run due to our disable.
	 * Therefore we need to do the job of drm_handle_vblank() and
	 * increment the vblank counter by one to account for this vblank.
	 *
	 * Skip this step if there isn't any high precision timestamp
	 * available. In that case we can't account for this and just
	 * hope for the best.
	 */
	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
		atomic_inc(&dev->_vblank_count[crtc]);
		smp_mb__after_atomic_inc();
	}

	/* Invalidate all timestamps while vblank irq's are off. */
	clear_vblank_timestamps(dev, crtc);

	mtx_leave(&dev->vblank_time_lock);
}