TEST_F(PrintContextTest, LinkTargetSvg)
{
    MockCanvas canvas;
    setBodyInnerHTML("<svg width='100' height='100'>"
        "<a xlink:href='http://www.w3.org'><rect x='20' y='20' width='50' height='50'/></a>"
        "<text x='10' y='90'><a xlink:href='http://www.google.com'><tspan>google</tspan></a></text>"
        "</svg>");
    printSinglePage(canvas);

    const Vector<MockCanvas::Operation>& operations = canvas.recordedOperations();
    ASSERT_EQ(2u, operations.size());
    EXPECT_EQ(MockCanvas::DrawRect, operations[0].type);
    EXPECT_SKRECT_EQ(20, 20, 50, 50, operations[0].rect);
    EXPECT_EQ(MockCanvas::DrawRect, operations[1].type);
    EXPECT_EQ(10, operations[1].rect.x());
    EXPECT_GE(90, operations[1].rect.y());
}
TEST_F(DlExtTest, ReservedHint) {
  void* start = mmap(nullptr, LIBSIZE, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
                     -1, 0);
  ASSERT_TRUE(start != MAP_FAILED);
  android_dlextinfo extinfo;
  extinfo.flags = ANDROID_DLEXT_RESERVED_ADDRESS_HINT;
  extinfo.reserved_addr = start;
  extinfo.reserved_size = LIBSIZE;
  handle_ = android_dlopen_ext(LIBNAME, RTLD_NOW, &extinfo);
  ASSERT_DL_NOTNULL(handle_);
  fn f = reinterpret_cast<fn>(dlsym(handle_, "getRandomNumber"));
  ASSERT_DL_NOTNULL(f);
  EXPECT_GE(f, start);
  EXPECT_LT(reinterpret_cast<void*>(f),
            reinterpret_cast<char*>(start) + LIBSIZE);
  EXPECT_EQ(4, f());
}
TEST_F(StackTest, stressInsertion)
{
	using us = std::chrono::microseconds;
	using clock = std::chrono::high_resolution_clock;

	const int qty = 1 << 20;
	stack<int, qty> t;

	auto start = clock::now();
	for (auto i = 0; i < qty; ++i) {
		t.push(i);
		ASSERT_EQ(i, t.top());
	}
	auto duration = std::chrono::duration_cast<us>(clock::now() - start);

	EXPECT_GE(100000, duration.count());
}
 ErrorStack run(thread::Thread* context) {
   start_rendezvous.wait();
   assorted::UniformRandom rand;
   rand.set_current_seed(client_id_);
   Epoch highest_commit_epoch;
   xct::XctManager* xct_manager = context->get_engine()->get_xct_manager();
   xct::XctId prev_xct_id;
   for (int i = 0; i < kXctsPerThread; ++i) {
     uint64_t account_id;
     if (contended) {
       account_id = rand.next_uint32() % (kBranches * kAccounts);
     } else {
       const uint64_t accounts_per_thread = (kBranches * kAccounts / kMaxTestThreads);
       account_id = rand.next_uint32() % accounts_per_thread
         + (client_id_ * accounts_per_thread);
     }
     uint64_t teller_id = account_id / kAccountsPerTellers;
     uint64_t branch_id = account_id / kAccounts;
     uint64_t history_id = client_id_ * kXctsPerThread + i;
     int64_t  amount = rand.uniform_within(kAmountRangeFrom, kAmountRangeTo);
     EXPECT_GE(amount, kAmountRangeFrom);
     EXPECT_LE(amount, kAmountRangeTo);
     while (true) {
       ErrorStack error_stack = try_transaction(context, &highest_commit_epoch,
         branch_id, teller_id, account_id, history_id, amount);
       if (!error_stack.is_error()) {
         xct::XctId xct_id = context->get_current_xct().get_id();
         if (prev_xct_id.get_epoch() == xct_id.get_epoch()) {
           EXPECT_LT(prev_xct_id.get_ordinal(), xct_id.get_ordinal());
         }
         prev_xct_id = context->get_current_xct().get_id();
         break;
       } else if (error_stack.get_error_code() == kErrorCodeXctRaceAbort) {
         // abort and retry
         if (context->get_current_xct().is_active()) {
           CHECK_ERROR(xct_manager->abort_xct(context));
         }
       } else {
         COERCE_ERROR(error_stack);
       }
     }
   }
   CHECK_ERROR(xct_manager->wait_for_commit(highest_commit_epoch));
   return foedus::kRetOk;
 }
TEST(MonotoneCubicInterpolationTest, fitAkimaDataSet)
{
  std::vector<double> x(11);
  std::vector<double> y(11);

  x[0] = 0;
  y[0] = 10;
  x[1] = 2;
  y[1] = 10;
  x[2] = 3;
  y[2] = 10;
  x[3] = 5;
  y[3] = 10;
  x[4] = 6;
  y[4] = 10;
  x[5] = 8;
  y[5] = 10;
  x[6] = 9;
  y[6] = 10.5;
  x[7] = 11;
  y[7] = 15;
  x[8] = 12;
  y[8] = 50;
  x[9] = 14;
  y[9] = 60;
  x[10] = 15;
  y[10] = 85;

  MonotoneCubicInterpolation interp(x, y);

  EXPECT_NEAR(interp.sample(0.), 10., tol);
  EXPECT_NEAR(interp.sample(2.), 10., tol);
  EXPECT_NEAR(interp.sample(3.), 10., tol);
  EXPECT_NEAR(interp.sample(5.), 10., tol);
  EXPECT_NEAR(interp.sample(6.), 10., tol);
  EXPECT_NEAR(interp.sample(8.), 10., tol);
  EXPECT_NEAR(interp.sample(9.), 10.5, tol);
  EXPECT_NEAR(interp.sample(11.), 15., tol);
  EXPECT_NEAR(interp.sample(12.), 50., tol);
  EXPECT_NEAR(interp.sample(14.), 60., tol);
  EXPECT_NEAR(interp.sample(15.), 85., tol);

  for (double z = 0; z <= 15.; z += .1)
    EXPECT_GE(interp.sampleDerivative(z), -tol);
}
TYPED_TEST(InnerProductLayerTest, TestCPU) {
  LayerParameter layer_param;
  Caffe::set_mode(Caffe::CPU);
  layer_param.set_num_output(10);
  layer_param.mutable_weight_filler()->set_type("uniform");
  layer_param.mutable_bias_filler()->set_type("uniform");
  layer_param.mutable_bias_filler()->set_min(1);
  layer_param.mutable_bias_filler()->set_max(2);
  shared_ptr<InnerProductLayer<TypeParam> > layer(
      new InnerProductLayer<TypeParam>(layer_param));
  layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
  layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
  const TypeParam* data = this->blob_top_->cpu_data();
  const int count = this->blob_top_->count();
  for (int i = 0; i < count; ++i) {
    EXPECT_GE(data[i], 1.);
  }
}
///\brief This implements an algorithm for the calculation of the fibonacci number that has a
///linear runtime and a constant memory usage.
int fibonacciConst(int n)
{
    #ifdef TEST_RUN
    EXPECT_GE(n,0)<< "A negative value" << n << "was passed to fibonacciConst";
    #endif
    if (n<2)
        return n;
    int fib1=0;
    int fib2=1;
    int ret=0;
    for (int i=2;i<n+1;i++)
    {
        ret=fib1+fib2;
        fib1=fib2;
        fib2=ret;
    }
    return ret;
}
Beispiel #8
0
TEST(ComediSysfs, CanReadMaxReadBuffer ) {
    lsampl_t data;
    int ret;
    int subdev = 0;        
    int chan = 0;          
    int range = 0;         
    int aref = AREF_GROUND;
    FILE *fp;
    char buf[100];
    int bufsize;

    std::string sysfs_entry("/sys/class/comedi/comedi0/max_read_buffer_kb");
    fp = fopen(sysfs_entry.c_str(),"r");
    ASSERT_TRUE( fp ) << "Able to open file " << sysfs_entry << "\n";
    fscanf(fp,"%d", &bufsize );
    EXPECT_GE( bufsize, 0 );
    comedi_close( dev );
}
TEST_F(ResourceOffersTest, ResourceOfferWithMultipleSlaves)
{
  Try<Owned<cluster::Master>> master = StartMaster();
  ASSERT_SOME(master);

  Owned<MasterDetector> detector = master.get()->createDetector();
  vector<Owned<cluster::Slave>> slaves;

  // Start 10 slaves.
  for (int i = 0; i < 10; i++) {
    slave::Flags flags = CreateSlaveFlags();

    flags.resources = Option<std::string>("cpus:2;mem:1024");

    Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
    ASSERT_SOME(slave);
    slaves.push_back(slave.get());
  }

  MockScheduler sched;
  MesosSchedulerDriver driver(
      &sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);

  EXPECT_CALL(sched, registered(&driver, _, _))
    .Times(1);

  Future<vector<Offer>> offers;
  EXPECT_CALL(sched, resourceOffers(&driver, _))
    .WillOnce(FutureArg<1>(&offers))
    .WillRepeatedly(Return()); // All 10 slaves might not be in first offer.

  driver.start();

  AWAIT_READY(offers);
  EXPECT_NE(0u, offers.get().size());
  EXPECT_GE(10u, offers.get().size());

  Resources resources(offers.get()[0].resources());
  EXPECT_EQ(2, resources.get<Value::Scalar>("cpus").get().value());
  EXPECT_EQ(1024, resources.get<Value::Scalar>("mem").get().value());

  driver.stop();
  driver.join();
}
TEST_F(RandomInterfaceTest, rand)
{
    // Test range and distribution of xme_hal_random_rand()

    const double safetyFactor = 2.0;
    double sum = 0;

    uint16_t minExpected = ((uint16_t)(numSamples / ((double)XME_HAL_RANDOM_RAND_MAX+1) / safetyFactor));
    uint16_t maxExpected = ((uint16_t)(numSamples / ((double)XME_HAL_RANDOM_RAND_MAX+1) * safetyFactor));

    for (uint64_t i = 0; i <= (double)numRounds*65536U; i++)
    {
        uint16_t r = xme_hal_random_rand();

        // Prevent overflow
        if (distribution[r] < 0xFFFF)
        {
            distribution[r]++;
        }

        sum += r;
    }

    printf("xme_hal_random_rand() test:\n");
    printf("- Minimum expected value per bin: %d (should be >> 0 for the test to be effective)\n", minExpected);
    printf("- Maximum expected value per bin: %d (must be <= 65535 for the test to be effective)\n", maxExpected);

    for (uint64_t i = 0; i <= XME_HAL_RANDOM_RAND_MAX; i++)
    {
        // In a truly uniform distribution, each item would be incremented
        // numSamples/(XME_HAL_RANDOM_RAND_MAX+1) times. These are safety bounds
        // that should always be true, given a somehow uniform distribution.
        EXPECT_GE(distribution[i], minExpected);
        EXPECT_LE(distribution[i], maxExpected);
    }

    // Check whether mean over all random number
    // is almost the mean of the interval
    {
        double mean = sum / (XME_HAL_RANDOM_RAND_MAX+1) / numSamples;
        EXPECT_LT(0.499, mean);
        EXPECT_LT(mean, 0.501);
    }
}
TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) {
  Caffe::set_mode(Caffe::GPU);
  Caffe::set_phase(Caffe::TRAIN);
  LayerParameter layer_param;
  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
  pooling_param->set_kernel_size(3);
  pooling_param->set_stride(2);
  pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC);
  PoolingLayer<TypeParam> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
  layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));

  // Check if the output is correct - it should do random sampling
  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
  const TypeParam* top_data = this->blob_top_->cpu_data();
  TypeParam total = 0;
  for (int n = 0; n < this->blob_top_->num(); ++n) {
    for (int c = 0; c < this->blob_top_->channels(); ++c) {
      for (int ph = 0; ph < this->blob_top_->height(); ++ph) {
        for (int pw = 0; pw < this->blob_top_->width(); ++pw) {
          TypeParam pooled = top_data[this->blob_top_->offset(n, c, ph, pw)];
          total += pooled;
          int hstart = ph * 2;
          int hend = min(hstart + 3, this->blob_bottom_->height());
          int wstart = pw * 2;
          int wend = min(wstart + 3, this->blob_bottom_->width());
          bool has_equal = false;
          for (int h = hstart; h < hend; ++h) {
            for (int w = wstart; w < wend; ++w) {
              has_equal |= (pooled == bottom_data[this->blob_bottom_->
                  offset(n, c, h, w)]);
            }
          }
          EXPECT_TRUE(has_equal);
        }
      }
    }
  }
  // When we are doing stochastic pooling, the average we get should be higher
  // than the simple data average since we are weighting more on higher-valued
  // ones.
  EXPECT_GE(total / this->blob_top_->count(), 0.55);
}
void doResize(T& target, std::vector<bool>& valid, std::size_t newSize) {
  auto oldSize = target.size();
  auto before = validData(target, valid);
  target.resize(newSize);
  valid.resize(newSize);
  for (auto i = oldSize; i < newSize; ++i) {
    valid[i] = true;
  }
  auto after = validData(target, valid);
  if (oldSize == newSize) {
    EXPECT_EQ(before, after);
  } else if (oldSize < newSize) {
    EXPECT_LT(before.size(), after.size());
    EXPECT_TRUE(std::equal(before.begin(), before.end(), after.begin()));
  } else {
    EXPECT_GE(before.size(), after.size());
    EXPECT_TRUE(std::equal(after.begin(), after.end(), before.begin()));
  }
}
TEST(ArrayBufferBuilderTest, Append)
{
    const char data[] = "HelloWorld";
    size_t dataSize = sizeof(data) - 1;

    ArrayBufferBuilder builder(2 * dataSize);

    EXPECT_EQ(dataSize, builder.append(data, dataSize));
    EXPECT_EQ(dataSize, builder.byteLength());
    EXPECT_EQ(dataSize * 2, builder.capacity());

    EXPECT_EQ(dataSize, builder.append(data, dataSize));
    EXPECT_EQ(dataSize * 2, builder.byteLength());
    EXPECT_EQ(dataSize * 2, builder.capacity());

    EXPECT_EQ(dataSize, builder.append(data, dataSize));
    EXPECT_EQ(dataSize * 3, builder.byteLength());
    EXPECT_GE(builder.capacity(), dataSize * 3);
}
// Test queue without concurrency.
TEST(ThreadSafePriorityQueue, Serial)
{
   const U32 min = 0;
   const U32 max = 9;
   const U32 len = 11;

   U32 indices[len]    = {  2,   7,   4,   6,   1,   5,   3,   8,   6,   9, 0};
   F32 priorities[len] = {0.2, 0.7, 0.4, 0.6, 0.1, 0.5, 0.3, 0.8, 0.6, 0.9, 0};
   
   ThreadSafePriorityQueue<U32, F32, true>  minQueue;
   ThreadSafePriorityQueue<U32, F32, false> maxQueue;

   for(U32 i = 0; i < len; i++)
   {
      minQueue.insert(priorities[i], indices[i]);
      maxQueue.insert(priorities[i], indices[i]);
   }

   EXPECT_FALSE(minQueue.isEmpty());
   EXPECT_FALSE(maxQueue.isEmpty());
   
   U32 index = min;
   for(U32 i = 0; i < len; i++)
   {
      U32 popped;
      EXPECT_TRUE(minQueue.takeNext(popped))
         << "Failed to pop element from minQueue";
      EXPECT_LE(index, popped)
         << "Element from minQueue was not in sort order";
      index = popped;
   }
   
   index = max;
   for(U32 i = 0; i < len; i++)
   {
      U32 popped;
      EXPECT_TRUE(maxQueue.takeNext(popped))
         << "Failed to pop element from maxQueue";
      EXPECT_GE(index, popped)
         << "Element from maxQueue was not in sort order";
      index = popped;
   }
}
Beispiel #15
0
unsigned
hash(int h, bfpid_t const &pid)
{
    if(debug) cout << "h=" << h << " pid=" << pid << endl;
    EXPECT_GE(h, 0);
    EXPECT_LT(h, (int) HASH_COUNT); // this cast is required to avoid "an unnamed type..." error in suncc
    unsigned x = pid.vol();
    if(debug) cout << " x= " << x << endl;
    x ^= pid.page; // XOR doesn't do much, since
                   // most of the time the volume is the same for all pages
    if(debug) cout << " x= " << x << endl;
    EXPECT_LT(h, (int) HASH_COUNT);

    unsigned retval = w_hashing::uhash::hash64(_hash_seeds[h], x);

    if(debug) cout << " retval= " << retval << endl;
    retval %= unsigned(_size);
    if(debug) cout << " retval= " << retval << endl;
    return retval;
}
Beispiel #16
0
TEST(EventManager, ResetTimer) {
    bool callbackFired = false;
    fp::InlineQueue queue;

    fp::TimerEvent *timer = queue.registerEvent(std::chrono::seconds(1), [&](const fp::TimerEvent *event) {
        callbackFired = true;
    });

    const auto startTime = std::chrono::steady_clock::now();
    queue.dispatch(std::chrono::milliseconds(500)); //0.5 seconds
    EXPECT_FALSE(callbackFired);
    timer->reset();
    queue.dispatch(std::chrono::seconds(2));
    const auto endTime = std::chrono::steady_clock::now();

    const auto actual = std::chrono::duration_cast<std::chrono::milliseconds>(endTime - startTime);
    EXPECT_TRUE(callbackFired);
    EXPECT_LT(actual.count(), 1600);
    EXPECT_GE(actual.count(), 1500);
}
void randomgentest(int limit)
{
   csString scriptstr;
    if (limit<0)
    {
       scriptstr = "Roll = rnd();";
       limit=1;
    }
    else    
    {
       scriptstr.Format("Roll = rnd(%d);",limit);
    }
    MathScript *script = MathScript::Create("randomgen test", scriptstr.GetData());
    MathEnvironment env;
    ASSERT_NE(script, NULL) << scriptstr.GetData() << " did not create script";
    bool above1=false, abovehalflimit=false;
    for (int i=0; i<100; i++) //try 100 times, since this is random
    {
       script->Evaluate(&env);
       MathVar *roll = env.Lookup("Roll");
       EXPECT_GE(roll->GetValue(), 0); 
       EXPECT_LE(roll->GetValue(), limit);
       if (roll->GetValue()>1)
       {
          above1 = true;
       }
       if (2*roll->GetValue()>limit)
       {
          abovehalflimit = true;
       }
    }
    if (limit>1)
    {
       EXPECT_TRUE(above1) << scriptstr << "never exceeds 1"; 
    }
    if (limit>0)
    {
       EXPECT_TRUE(abovehalflimit) << scriptstr << "never exceeds half of limit"; 
    }
    delete script;
}
Beispiel #18
0
TEST(umbrella, iovecSerialize) {
  size_t nIovs = 3;
  char value[] = "value";
  iovec iovs[] = {{&value[0], 1}, {&value[1], 2}, {&value[3], 2}};

  um_backing_msg_t bmsg;
  um_backing_msg_init(&bmsg);

  uint64_t reqId = 1;
  mc_msg_t msg;
  mc_msg_init_not_refcounted(&msg);
  msg.op = mc_op_get;
  msg.result = mc_res_found;
  msg.flags = 0x4;

  // Verify emitIov
  std::string serializedVal = "";
  EXPECT_EQ(
      um_emit_iovs_extended(
          &bmsg, reqId, &msg, iovs, nIovs, emitIov, (void *)&serializedVal),
      0);

  verifySerializedValue(serializedVal, reqId, &msg, value);


  // Verify writeIov
  um_backing_msg_cleanup(&bmsg);
  um_backing_msg_init(&bmsg);
  serializedVal = "";
  reqId++;

  iovec output[20];
  ssize_t nOutIovs = um_write_iovs_extended(&bmsg, reqId, &msg,
                                            iovs, nIovs, output, 20);
  EXPECT_GE(nOutIovs, 1);
  for (int i = 0; i < nOutIovs; i++) {
    serializedVal.append((const char *)output[i].iov_base, output[i].iov_len);
  }

  verifySerializedValue(serializedVal, reqId, &msg, value);
}
Beispiel #19
0
// Verify the sorting, [EGL 1.5] section 3.4.1.2 pg 30:
// [configs are sorted] by larger total number of color bits (for an RGB
// color buffer this is the sum of EGL_RED_SIZE, EGL_GREEN_SIZE, EGL_BLUE_SIZE,
// and EGL_ALPHA_SIZE; for a luminance color buffer, the sum of EGL_LUMINANCE_SIZE
// and EGL_ALPHA_SIZE).If the requested number of bits in attrib list for a
// particular color component is 0 or EGL_DONT_CARE, then the number of bits
// for that component is not considered.
TEST(ConfigSetTest, Sorting_BitSizes)
{
    egl::ConfigSet set;
    size_t testConfigCount = 64;
    for (size_t i = 0; i < testConfigCount; i++)
    {
        egl::Config config = GenerateGenericConfig();

        // Give random-ish bit sizes to the config
        config.redSize =   (i *  2) %  3;
        config.greenSize = (i +  5) %  7;
        config.blueSize =  (i +  7) % 11;
        config.alphaSize = (i + 13) % 17;

        set.add(config);
    }

    egl::AttributeMap greaterThan1BitFilter;
    greaterThan1BitFilter.insert(EGL_RED_SIZE, 1);
    greaterThan1BitFilter.insert(EGL_GREEN_SIZE, 1);
    greaterThan1BitFilter.insert(EGL_BLUE_SIZE, 1);
    greaterThan1BitFilter.insert(EGL_ALPHA_SIZE, 1);

    std::vector<const egl::Config *> filteredConfigs = set.filter(greaterThan1BitFilter);
    for (size_t i = 1; i < filteredConfigs.size(); i++)
    {
        const egl::Config &prevConfig = *filteredConfigs[i - 1];
        size_t prevBitCount = prevConfig.redSize +
                              prevConfig.greenSize +
                              prevConfig.blueSize +
                              prevConfig.alphaSize;

        const egl::Config &curConfig = *filteredConfigs[i];
        size_t curBitCount = curConfig.redSize +
                             curConfig.greenSize +
                             curConfig.blueSize +
                             curConfig.alphaSize;

        EXPECT_GE(prevBitCount, curBitCount);
    }
}
TYPED_TEST(InnerProductLayerTest, TestForwardNoBatch) {
  typedef typename TypeParam::Dtype Dtype;
  this->blob_bottom_vec_.push_back(this->blob_bottom_nobatch_);
  LayerParameter layer_param;
  InnerProductParameter* inner_product_param =
      layer_param.mutable_inner_product_param();
  inner_product_param->set_num_output(10);
  inner_product_param->mutable_weight_filler()->set_type("uniform");
  inner_product_param->mutable_bias_filler()->set_type("uniform");
  inner_product_param->mutable_bias_filler()->set_min(1);
  inner_product_param->mutable_bias_filler()->set_max(2);
  shared_ptr<InnerProductLayer<Dtype> > layer(
      new InnerProductLayer<Dtype>(layer_param));
  layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  const Dtype* data = this->blob_top_->cpu_data();
  const int_tp count = this->blob_top_->count();
  for (int_tp i = 0; i < count; ++i) {
    EXPECT_GE(data[i], 1.);
  }
}
Beispiel #21
0
TEST(RandomGenerateTest, CheckULLParams) {
	std::vector<unsigned long long> arr;

	std::ifstream in("generate_tests.txt");
	int test_num;
	in >> test_num;

	SeqParams<unsigned long long> params, res_params;

	for (int i = 0; i < test_num; ++i) {
		in >> params;
		
		generate(arr, params.len, params.min_val, params .max_val, params.repetitions_percent);
		std::cerr << params.len << " " << params.min_val << " " << params .max_val << " " << params.repetitions_percent << std::endl;
		res_params = calcRepetitionsPercent(arr);

		EXPECT_LE(params.min_val, res_params.min_val);
		EXPECT_GE(params.max_val, res_params.max_val);
		EXPECT_LE(abs(params.repetitions_percent - res_params.repetitions_percent), 10);
	}
}
Beispiel #22
0
TYPED_TEST(ThresholdLayerTest, Test) {
  typedef typename TypeParam::Dtype Dtype;
  LayerParameter layer_param;
  ThresholdLayer<Dtype> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  // Now, check values
  const Dtype* bottom_data = this->blob_bottom_->cpu_data();
  const Dtype* top_data = this->blob_top_->cpu_data();
  const Dtype threshold_ = layer_param.threshold_param().threshold();
  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
    EXPECT_GE(top_data[i], 0.);
    EXPECT_LE(top_data[i], 1.);
    if (top_data[i] == 0) {
      EXPECT_LE(bottom_data[i], threshold_);
    }
    if (top_data[i] == 1) {
      EXPECT_GT(bottom_data[i], threshold_);
    }
  }
}
VOID TEST(SampleTest, FastSampleMacrosTest) 
{
    EXPECT_TRUE(1);
    EXPECT_FALSE(0);
    
    EXPECT_EQ(1, 1); // ==
    EXPECT_NE(1, 2); // !=
    EXPECT_LE(1, 2); // <=
    EXPECT_LT(1, 2); // <
    EXPECT_GE(2, 1); // >=
    EXPECT_GT(2, 1); // >

    EXPECT_STREQ("winlin", "winlin");
    EXPECT_STRNE("winlin", "srs");
    EXPECT_STRCASEEQ("winlin", "Winlin");
    EXPECT_STRCASENE("winlin", "srs");
    
    EXPECT_FLOAT_EQ(1.0, 1.000000000000001);
    EXPECT_DOUBLE_EQ(1.0, 1.0000000000000001);
    EXPECT_NEAR(10, 15, 5);
}
TEST_F(EventsDatabaseTests, test_optimize) {
  auto sub = std::make_shared<DBFakeEventSubscriber>();
  for (size_t i = 800; i < 800 + 10; ++i) {
    sub->testAdd(i);
  }

  // Lie about the tool type to enable optimizations.
  auto default_type = kToolType;
  kToolType = ToolType::DAEMON;
  FLAGS_events_optimize = true;

  // Must also define an executing query.
  setDatabaseValue(kPersistentSettings, kExecutingQuery, "events_db_test");

  auto t = getUnixTime();
  auto results = genRows(sub.get());
  EXPECT_EQ(10U, results.size());
  // Optimization will set the time NOW as the minimum event time.
  // Thus it is not possible to set event in past.
  EXPECT_GE(sub->optimize_time_ + 100, t);
  EXPECT_LE(sub->optimize_time_ - 100, t);
  // The last EID returned will also be stored for duplication checks.
  EXPECT_EQ(10U, sub->optimize_eid_);

  for (size_t i = t + 800; i < t + 800 + 10; ++i) {
    sub->testAdd(i);
  }

  results = genRows(sub.get());
  EXPECT_EQ(10U, results.size());

  // The optimize time should have been written to the database.
  // It should be the same as the current (relative) optimize time.
  std::string content;
  getDatabaseValue("events", "optimize.events_db_test", content);
  EXPECT_EQ(std::to_string(sub->optimize_time_), content);

  // Restore the tool type.
  kToolType = default_type;
}
Beispiel #25
0
TEST(HeapTest, SimpleHeapTest)
{
    Heap<int> heap;

    EXPECT_TRUE(heap.isEmpty());

    heap.add(1);
    heap.add(2);
    heap.add(3);

    EXPECT_EQ(heap.getNumNodes(), 3);
    EXPECT_EQ(heap.getHeight(), 2);

    for (int i = 4; i <= 15; i++)
    {
        heap.add(i);
    }

    ASSERT_EQ(heap.getNumNodes(), 15);
    EXPECT_EQ(heap.getHeight(), 4);

    int* testArray = new int[15];
    for (int i = 0; i < 15; i++)
    {
        testArray[i] = heap.peekTop();
        heap.remove();
    }

    for (int i = 0; i < 14; i++)
    {
        EXPECT_GE(testArray[i], testArray[i + 1]);
    }

    heap.clear();

    EXPECT_TRUE(heap.isEmpty());

    delete[] testArray;
}
TYPED_TEST(MaxPoolingDropoutTest, TestBackward) {
  typedef typename TypeParam::Dtype Dtype;
  LayerParameter layer_param;
  layer_param.set_phase(TRAIN);
  PoolingParameter* pooling_param = layer_param.mutable_pooling_param();
  pooling_param->clear_kernel_size();
  pooling_param->add_kernel_size(3);
  pooling_param->clear_stride();
  pooling_param->add_stride(2);
  PoolingLayer<Dtype> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  for (int i = 0; i < this->blob_top_->count(); ++i) {
    this->blob_top_->mutable_cpu_diff()[i] = 1.;
  }
  vector<bool> propagate_down(this->blob_bottom_vec_.size(), true);
  layer.Backward(this->blob_top_vec_, propagate_down,
                 this->blob_bottom_vec_);
  const Dtype* bottom_diff = this->blob_bottom_->cpu_diff();
  Dtype sum = 0.;
  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
    sum += bottom_diff[i];
  }
  EXPECT_EQ(sum, this->blob_top_->count());
  // Dropout in-place
  DropoutLayer<Dtype> dropout_layer(layer_param);
  dropout_layer.SetUp(this->blob_top_vec_, this->blob_top_vec_);
  dropout_layer.Forward(this->blob_top_vec_, this->blob_top_vec_);
  dropout_layer.Backward(this->blob_top_vec_, propagate_down,
                         this->blob_top_vec_);
  layer.Backward(this->blob_top_vec_, propagate_down,
                 this->blob_bottom_vec_);
  Dtype sum_with_dropout = 0.;
  bottom_diff = this->blob_bottom_->cpu_diff();
  for (int i = 0; i < this->blob_bottom_->count(); ++i) {
    sum_with_dropout += bottom_diff[i];
  }
  EXPECT_GE(sum_with_dropout, sum);
}
TYPED_TEST(ArgMaxLayerTest, TestCPU) {
  LayerParameter layer_param;
  ArgMaxLayer<TypeParam> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  // Now, check values
  const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
  const TypeParam* top_data = this->blob_top_->cpu_data();
  int max_ind;
  TypeParam max_val;
  int num = this->blob_bottom_->num();
  int dim = this->blob_bottom_->count() / num;
  for (int i = 0; i < num; ++i) {
    EXPECT_GE(top_data[i], 0);
    EXPECT_LE(top_data[i], dim);
    max_ind = top_data[i];
    max_val = bottom_data[i * dim + max_ind];
    for (int j = 0; j < dim; ++j) {
      EXPECT_LE(bottom_data[i * dim + j], max_val);
    }
  }
}
TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) {
  Caffe::set_mode(Caffe::GPU);
  LayerParameter layer_param;
  CuDNNTanHLayer<TypeParam> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  // Test exact values
  for (int i = 0; i < this->blob_bottom_->num(); ++i) {
    for (int j = 0; j < this->blob_bottom_->channels(); ++j) {
      for (int k = 0; k < this->blob_bottom_->height(); ++k) {
        for (int l = 0; l < this->blob_bottom_->width(); ++l) {
          EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4,
             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
          EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4,
             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
        }
      }
    }
  }
}
TYPED_TEST(NeuronLayerTest, TestTanH) {
  typedef typename TypeParam::Dtype Dtype;
  LayerParameter layer_param;
  TanHLayer<Dtype> layer(layer_param);
  layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
  layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
  // Test exact values
  for (int i = 0; i < this->blob_bottom_->num(); ++i) {
    for (int j = 0; j < this->blob_bottom_->channels(); ++j) {
      for (int k = 0; k < this->blob_bottom_->height(); ++k) {
        for (int l = 0; l < this->blob_bottom_->width(); ++l) {
          EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4,
             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
          EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4,
             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) /
             (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1));
        }
      }
    }
  }
}
Beispiel #30
0
TEST(MapTests, TestOpenMap) {
    char** options = nullptr;
    options = ngsListAddNameValue(options, "DEBUG_MODE", "ON");
    options = ngsListAddNameValue(options, "SETTINGS_DIR",
                              ngsFormFileName(ngsGetCurrentDirectory(), "tmp",
                                              nullptr));
    EXPECT_EQ(ngsInit(options), COD_SUCCESS);

    ngsListFree(options);

    ngs::MapStore mapStore;
    ngs::CatalogPtr catalog = ngs::Catalog::instance();
    CPLString tmpDir = CPLFormFilename(CPLGetCurrentDir(), "tmp", nullptr);
    ngs::ObjectPtr tmpDirObj = catalog->getObjectBySystemPath(tmpDir);
    ASSERT_NE(tmpDirObj, nullptr);
    ngs::ObjectContainer* tmpDirContainer =
            ngsDynamicCast(ngs::ObjectContainer, tmpDirObj);

    tmpDirContainer->loadChildren();
    ASSERT_NE(tmpDirContainer->hasChildren(), 0);

    ngs::ObjectPtr mapFileObj = tmpDirContainer->getChild("default.ngmd");
    ngs::MapFile* mapFile = ngsDynamicCast(ngs::MapFile, mapFileObj);

    char mapId = mapStore.openMap(mapFile);
    EXPECT_GE(mapId, 0);
    ngs::MapViewPtr defMap = mapStore.getMap(mapId);
    ASSERT_NE(defMap, nullptr);
    EXPECT_EQ(defMap->hasIconSet("simple"), true);
    ngs::Catalog::setInstance(nullptr);

    auto iconData = defMap->iconSet("simple");
    ASSERT_NE(iconData.buffer, nullptr);
    EXPECT_EQ(iconData.buffer[0], 0);
    EXPECT_EQ(iconData.buffer[1], 0);
    EXPECT_EQ(iconData.buffer[2], 0);
    EXPECT_EQ(iconData.buffer[3], 0);
    ngsUnInit();
}