Ejemplo n.º 1
0
TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) {
  typedef typename TypeParam::Dtype Dtype;
  const int kernel_h = 11;
  const int kernel_w = 13;
  vector<int> bottom_shape(4);
  bottom_shape[0] = 15;
  bottom_shape[1] = 18;
  bottom_shape[2] = kernel_h * 2;
  bottom_shape[3] = kernel_w * 2;
  FillerParameter filler_param;
  GaussianFiller<Dtype> filler(filler_param);
  for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) {
    this->blob_bottom_vec_[i]->Reshape(bottom_shape);
    filler.Fill(this->blob_bottom_vec_[i]);
  }
  LayerParameter layer_param;
  ConvolutionParameter* convolution_param =
      layer_param.mutable_convolution_param();
  convolution_param->set_num_output(12);
  convolution_param->set_bias_term(false);
  convolution_param->set_group(6);
  convolution_param->set_kernel_h(kernel_h);
  convolution_param->set_kernel_w(kernel_w);
  convolution_param->mutable_weight_filler()->set_type("gaussian");
  Blob<Dtype> weights;
  Blob<Dtype> top_diff;
  // Shape and fill weights and top_diff.
  bool copy_diff;
  bool reshape;
  {
    ConvolutionLayer<Dtype> layer(layer_param);
    layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
    top_diff.ReshapeLike(*this->blob_top_);
    filler.Fill(&top_diff);
    ASSERT_EQ(1, layer.blobs().size());
    copy_diff = false; reshape = true;
    weights.CopyFrom(*layer.blobs()[0], copy_diff, reshape);
  }
  vector<bool> propagate_down(1, true);
  Blob<Dtype> result_2d;
  Blob<Dtype> backward_result_2d;
  Blob<Dtype> backward_weight_result_2d;
  // Test with 2D im2col
  {
    caffe_set(this->blob_top_->count(), Dtype(0),
              this->blob_top_->mutable_cpu_data());
    caffe_set(this->blob_bottom_->count(), Dtype(0),
              this->blob_bottom_->mutable_cpu_diff());
    caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff());
    // Do SetUp and Forward; save Forward result in result_2d.
    convolution_param->set_force_nd_im2col(false);
    ConvolutionLayer<Dtype> layer_2d(layer_param);
    layer_2d.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
    ASSERT_EQ(1, layer_2d.blobs().size());
    copy_diff = false; reshape = false;
    layer_2d.blobs()[0]->CopyFrom(weights, copy_diff, reshape);
    layer_2d.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
    copy_diff = false; reshape = true;
    result_2d.CopyFrom(*this->blob_top_, copy_diff, reshape);
    // Copy pre-generated top diff into actual top diff;
    // do Backward and save result in backward_result_2d.
    ASSERT_EQ(this->blob_top_->shape(), top_diff.shape());
    caffe_copy(top_diff.count(), top_diff.cpu_data(),
               this->blob_top_->mutable_cpu_diff());
    layer_2d.Backward(this->blob_top_vec_, propagate_down,
                      this->blob_bottom_vec_);
    copy_diff = true; reshape = true;
    backward_result_2d.CopyFrom(*this->blob_bottom_, copy_diff, reshape);
    backward_weight_result_2d.CopyFrom(weights, copy_diff, reshape);
  }
  Blob<Dtype> result_nd;
  Blob<Dtype> backward_result_nd;
  Blob<Dtype> backward_weight_result_nd;
  // Test with ND im2col
  {
    caffe_set(this->blob_top_->count(), Dtype(0),
              this->blob_top_->mutable_cpu_data());
    caffe_set(this->blob_bottom_->count(), Dtype(0),
              this->blob_bottom_->mutable_cpu_diff());
    caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff());
    // Do SetUp and Forward; save Forward result in result_nd.
    convolution_param->set_force_nd_im2col(true);
    ConvolutionLayer<Dtype> layer_nd(layer_param);
    layer_nd.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
    ASSERT_EQ(1, layer_nd.blobs().size());
    copy_diff = false; reshape = false;
    layer_nd.blobs()[0]->CopyFrom(weights, copy_diff, reshape);
    layer_nd.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
    copy_diff = false; reshape = true;
    result_nd.CopyFrom(*this->blob_top_, copy_diff, reshape);
    // Copy pre-generated top diff into actual top diff;
    // do Backward and save result in backward_result_nd.
    ASSERT_EQ(this->blob_top_->shape(), top_diff.shape());
    caffe_copy(top_diff.count(), top_diff.cpu_data(),
               this->blob_top_->mutable_cpu_diff());
    layer_nd.Backward(this->blob_top_vec_, propagate_down,
                      this->blob_bottom_vec_);
    copy_diff = true; reshape = true;
    backward_result_nd.CopyFrom(*this->blob_bottom_, copy_diff, reshape);
    backward_weight_result_nd.CopyFrom(weights, copy_diff, reshape);
  }
  ASSERT_EQ(result_nd.count(), result_2d.count());
  for (int i = 0; i < result_2d.count(); ++i)  {
    EXPECT_EQ(result_2d.cpu_data()[i], result_nd.cpu_data()[i]);
  }
  ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count());
  for (int i = 0; i < backward_result_2d.count(); ++i) {
    EXPECT_EQ(backward_result_2d.cpu_diff()[i],
              backward_result_nd.cpu_diff()[i]);
  }
  ASSERT_EQ(backward_weight_result_nd.count(),
            backward_weight_result_2d.count());
  for (int i = 0; i < backward_weight_result_2d.count(); ++i) {
    EXPECT_EQ(backward_weight_result_2d.cpu_diff()[i],
              backward_weight_result_nd.cpu_diff()[i]);
  }
}
Ejemplo n.º 2
0
TEST_F(TestProducer, ContentKeyRequest)
{
  Name prefix("/prefix");
  Name suffix("/a/b/c");
  Name expectedInterest(prefix);
  expectedInterest.append(Encryptor::getNAME_COMPONENT_READ());
  expectedInterest.append(suffix);
  expectedInterest.append(Encryptor::getNAME_COMPONENT_E_KEY());

  Name cKeyName(prefix);
  cKeyName.append(Encryptor::getNAME_COMPONENT_SAMPLE());
  cKeyName.append(suffix);
  cKeyName.append(Encryptor::getNAME_COMPONENT_C_KEY());

  Name timeMarker("20150101T100000/20150101T120000");
  MillisecondsSince1970 testTime1 = fromIsoString("20150101T100001");
  MillisecondsSince1970 testTime2 = fromIsoString("20150101T110001");
  Name::Component testTimeRounded1("20150101T100000");
  Name::Component testTimeRounded2("20150101T110000");
  Name::Component testTimeComponent2("20150101T110001");

  // Create content keys required for this test case:
  for (size_t i = 0; i < suffix.size(); ++i) {
    createEncryptionKey(expectedInterest, timeMarker);
    expectedInterest = expectedInterest.getPrefix(-2).append
      (Encryptor::getNAME_COMPONENT_E_KEY());
  }

  int expressInterestCallCount = 0;

  // Prepare a TestFace to instantly answer calls to expressInterest.
  class TestFace : public Face {
  public:
    TestFace(TestProducer* parent, const Name& timeMarker,
             int* expressInterestCallCount)
    : Face("localhost"),
      parent_(parent),
      timeMarker_(timeMarker),
      expressInterestCallCount_(expressInterestCallCount)
    {}

    virtual uint64_t
    expressInterest
      (const Interest& interest, const OnData& onData,
       const OnTimeout& onTimeout, const OnNetworkNack& onNetworkNack,
       WireFormat& wireFormat = *WireFormat::getDefaultWireFormat())
    {
      ++(*expressInterestCallCount_);

      Name interestName(interest.getName());
      interestName.append(timeMarker_);
      if (parent_->encryptionKeys.find(interestName) == parent_->encryptionKeys.end())
        throw runtime_error
          ("TestFace::expressInterest: Can't find " + interestName.toUri());
      onData(ptr_lib::make_shared<Interest>(interest),
             parent_->encryptionKeys[interestName]);

      return 0;
    }

  private:
    TestProducer* parent_;
    Name timeMarker_;
    int *expressInterestCallCount_;
  };

  TestFace face(this, timeMarker, &expressInterestCallCount);

  // Verify that the content key is correctly encrypted for each domain, and
  // the produce method encrypts the provided data with the same content key.
  ptr_lib::shared_ptr<ProducerDb> testDb(new Sqlite3ProducerDb(databaseFilePath));
  Producer producer(prefix, suffix, &face, keyChain.get(), testDb);
  Blob contentKey;

  // An initial test to confirm that keys are created for this time slot.
  Name contentKeyName1 = producer.createContentKey
    (testTime1,
     bind(&TestProducer::checkEncryptionKeys, this, _1, testTime1,
          testTimeRounded1, 3, &expressInterestCallCount, &contentKey, cKeyName,
          testDb));

  // Verify that we do not repeat the search for e-keys. The total
  //   expressInterestCallCount should be the same.
  Name contentKeyName2 = producer.createContentKey
    (testTime2,
     bind(&TestProducer::checkEncryptionKeys, this, _1, testTime2,
          testTimeRounded2, 3, &expressInterestCallCount, &contentKey, cKeyName,
          testDb));

  // Confirm content key names are correct
  ASSERT_EQ(cKeyName, contentKeyName1.getPrefix(-1));
  ASSERT_EQ(testTimeRounded1, contentKeyName1.get(6));
  ASSERT_EQ(cKeyName, contentKeyName2.getPrefix(-1));
  ASSERT_EQ(testTimeRounded2, contentKeyName2.get(6));

  // Confirm that produce encrypts with the correct key and has the right name.
  Data testData;
  producer.produce(testData, testTime2, Blob(DATA_CONTENT, sizeof(DATA_CONTENT)));

  const Name& producedName = testData.getName();
  ASSERT_EQ(cKeyName.getPrefix(-1), producedName.getSubName(0, 5));
  ASSERT_EQ(testTimeComponent2, producedName.get(5));
  ASSERT_EQ(Encryptor::getNAME_COMPONENT_FOR(), producedName.get(6));
  ASSERT_EQ(cKeyName, producedName.getSubName(7, 6));
  ASSERT_EQ(testTimeRounded2, producedName.get(13));

  const Blob& dataBlob = testData.getContent();

  EncryptedContent dataContent;
  dataContent.wireDecode(dataBlob);
  const Blob& encryptedData = dataContent.getPayload();
  const Blob& initialVector = dataContent.getInitialVector();

  EncryptParams params(ndn_EncryptAlgorithmType_AesCbc, 16);
  params.setInitialVector(initialVector);
  Blob decryptTest = AesAlgorithm::decrypt(contentKey, encryptedData, params);
  ASSERT_TRUE(decryptTest.equals(Blob(DATA_CONTENT, sizeof(DATA_CONTENT))));
}
 inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) {
   if (!force_nd_im2col_ && num_spatial_axes_ == 2) {
     col2im_gpu(col_buff, conv_in_channels_,
         conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],
         kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],
         pad_.cpu_data()[0], pad_.cpu_data()[1],
         stride_.cpu_data()[0], stride_.cpu_data()[1], data);
   } else {
     col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_,
         conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(),
         kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(),
         data);
   }
 }
Ejemplo n.º 4
0
void GrAtlasTextBatch::onPrepareDraws(Target* target) const {
    // if we have RGB, then we won't have any SkShaders so no need to use a localmatrix.
    // TODO actually only invert if we don't have RGBA
    SkMatrix localMatrix;
    if (this->usesLocalCoords() && !this->viewMatrix().invert(&localMatrix)) {
        SkDebugf("Cannot invert viewmatrix\n");
        return;
    }

    GrTexture* texture = fFontCache->getTexture(this->maskFormat());
    if (!texture) {
        SkDebugf("Could not allocate backing texture for atlas\n");
        return;
    }

    GrMaskFormat maskFormat = this->maskFormat();

    FlushInfo flushInfo;
    if (this->usesDistanceFields()) {
        flushInfo.fGeometryProcessor.reset(
            this->setupDfProcessor(this->viewMatrix(), fFilteredColor, this->color(), texture));
    } else {
        GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kNone_FilterMode);
        flushInfo.fGeometryProcessor.reset(
            GrBitmapTextGeoProc::Create(this->color(),
                                        texture,
                                        params,
                                        maskFormat,
                                        localMatrix,
                                        this->usesLocalCoords()));
    }

    flushInfo.fGlyphsToFlush = 0;
    size_t vertexStride = flushInfo.fGeometryProcessor->getVertexStride();
    SkASSERT(vertexStride == GrAtlasTextBlob::GetVertexStride(maskFormat));

    int glyphCount = this->numGlyphs();
    const GrBuffer* vertexBuffer;

    void* vertices = target->makeVertexSpace(vertexStride,
                                             glyphCount * kVerticesPerGlyph,
                                             &vertexBuffer,
                                             &flushInfo.fVertexOffset);
    flushInfo.fVertexBuffer.reset(SkRef(vertexBuffer));
    flushInfo.fIndexBuffer.reset(target->resourceProvider()->refQuadIndexBuffer());
    if (!vertices || !flushInfo.fVertexBuffer) {
        SkDebugf("Could not allocate vertices\n");
        return;
    }

    unsigned char* currVertex = reinterpret_cast<unsigned char*>(vertices);

    // We cache some values to avoid going to the glyphcache for the same fontScaler twice
    // in a row
    const SkDescriptor* desc = nullptr;
    SkGlyphCache* cache = nullptr;
    GrFontScaler* scaler = nullptr;
    SkTypeface* typeface = nullptr;

    GrBlobRegenHelper helper(this, target, &flushInfo);

    for (int i = 0; i < fGeoCount; i++) {
        const Geometry& args = fGeoData[i];
        Blob* blob = args.fBlob;
        size_t byteCount;
        void* blobVertices;
        int subRunGlyphCount;
        blob->regenInBatch(target, fFontCache, &helper, args.fRun, args.fSubRun, &cache,
                           &typeface, &scaler, &desc, vertexStride, args.fViewMatrix, args.fX,
                           args.fY, args.fColor, &blobVertices, &byteCount, &subRunGlyphCount);

        // now copy all vertices
        memcpy(currVertex, blobVertices, byteCount);

#ifdef SK_DEBUG
        // bounds sanity check
        SkRect rect;
        rect.setLargestInverted();
        SkPoint* vertex = (SkPoint*) ((char*)blobVertices);
        rect.growToInclude(vertex, vertexStride, kVerticesPerGlyph * subRunGlyphCount);

        if (this->usesDistanceFields()) {
            args.fViewMatrix.mapRect(&rect);
        }
        // Allow for small numerical error in the bounds.
        SkRect bounds = fBounds;
        bounds.outset(0.001f, 0.001f);
        SkASSERT(bounds.contains(rect));
#endif

        currVertex += byteCount;
    }

    // Make sure to attach the last cache if applicable
    if (cache) {
        SkGlyphCache::AttachCache(cache);
    }
    this->flush(target, &flushInfo);
}
Ejemplo n.º 5
0
    void ZimCreator::createClusters(ArticleSource& src, const std::string& tmpfname)
    {
      std::ofstream out(tmpfname.c_str());

      Cluster cluster;
      cluster.setCompression(compression);

      DirentsType::size_type count = 0, progress = 0;
      for (DirentsType::iterator di = dirents.begin(); out && di != dirents.end(); ++di, ++count)
      {
        while (progress < count * 100 / dirents.size() + 1)
        {
          INFO(progress << "% ready");
          progress += 10;
        }

        if (di->isRedirect())
          continue;

        Blob blob = src.getData(di->getAid());
        if (blob.size() > 0)
          isEmpty = false;

        if (di->isCompress())
        {
          di->setCluster(clusterOffsets.size(), cluster.count());
          cluster.addBlob(blob);
          if (cluster.size() >= minChunkSize * 1024)
          {
            log_info("compress cluster with " << cluster.count() << " articles, " << cluster.size() << " bytes; current title \"" << di->getTitle() << '\"');

            clusterOffsets.push_back(out.tellp());
            out << cluster;
            log_debug("cluster compressed");
            cluster.clear();
            cluster.setCompression(compression);
          }
        }
        else
        {
          if (cluster.count() > 0)
          {
            clusterOffsets.push_back(out.tellp());
            cluster.setCompression(compression);
            out << cluster;
            cluster.clear();
            cluster.setCompression(compression);
          }

          di->setCluster(clusterOffsets.size(), cluster.count());
          clusterOffsets.push_back(out.tellp());
          Cluster c;
          c.addBlob(blob);
          c.setCompression(zimcompNone);
          out << c;
        }
      }

      if (cluster.count() > 0)
      {
        clusterOffsets.push_back(out.tellp());
        cluster.setCompression(compression);
        out << cluster;
      }

      if (!out)
        throw std::runtime_error("failed to write temporary cluster file");

      clustersSize = out.tellp();
    }
Ejemplo n.º 6
0
void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
  Reshape(other.num(), other.channels(), other.height(), other.width());
}
Ejemplo n.º 7
0
void Blob<Dtype>::ShareData(const Blob& other) {
  CHECK_EQ(count_, other.count());
  data_ = other.data();
}
Ejemplo n.º 8
0
Blob CKey::encryptECIES (CKey& otherKey, Blob const& plaintext)
{

    ECIES_ENC_IV_TYPE iv;
    RandomNumbers::getInstance ().fillBytes (iv.begin (), ECIES_ENC_BLK_SIZE);

    ECIES_ENC_KEY_TYPE secret;
    ECIES_HMAC_KEY_TYPE hmacKey;

    getECIESSecret (otherKey, secret, hmacKey);
    ECIES_HMAC_TYPE hmac = makeHMAC (hmacKey, plaintext);
    hmacKey.zero ();

    EVP_CIPHER_CTX ctx;
    EVP_CIPHER_CTX_init (&ctx);

    if (EVP_EncryptInit_ex (&ctx, ECIES_ENC_ALGO, NULL, secret.begin (), iv.begin ()) != 1)
    {
        EVP_CIPHER_CTX_cleanup (&ctx);
        secret.zero ();
        throw std::runtime_error ("init cipher ctx");
    }

    secret.zero ();

    Blob out (plaintext.size () + ECIES_HMAC_SIZE + ECIES_ENC_KEY_SIZE + ECIES_ENC_BLK_SIZE, 0);
    int len = 0, bytesWritten;

    // output IV
    memcpy (& (out.front ()), iv.begin (), ECIES_ENC_BLK_SIZE);
    len = ECIES_ENC_BLK_SIZE;

    // Encrypt/output HMAC
    bytesWritten = out.capacity () - len;
    assert (bytesWritten > 0);

    if (EVP_EncryptUpdate (&ctx, & (out.front ()) + len, &bytesWritten, hmac.begin (), ECIES_HMAC_SIZE) < 0)
    {
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("");
    }

    len += bytesWritten;

    // encrypt/output plaintext
    bytesWritten = out.capacity () - len;
    assert (bytesWritten > 0);

    if (EVP_EncryptUpdate (&ctx, & (out.front ()) + len, &bytesWritten, & (plaintext.front ()), plaintext.size ()) < 0)
    {
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("");
    }

    len += bytesWritten;

    // finalize
    bytesWritten = out.capacity () - len;

    if (EVP_EncryptFinal_ex (&ctx, & (out.front ()) + len, &bytesWritten) < 0)
    {
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("encryption error");
    }

    len += bytesWritten;

    // Output contains: IV, encrypted HMAC, encrypted data, encrypted padding
    assert (len <= (plaintext.size () + ECIES_HMAC_SIZE + (2 * ECIES_ENC_BLK_SIZE)));
    assert (len >= (plaintext.size () + ECIES_HMAC_SIZE + ECIES_ENC_BLK_SIZE)); // IV, HMAC, data
    out.resize (len);
    EVP_CIPHER_CTX_cleanup (&ctx);
    return out;
}
Ejemplo n.º 9
0
Blob CKey::decryptECIES (CKey& otherKey, Blob const& ciphertext)
{
    // minimum ciphertext = IV + HMAC + 1 block
    if (ciphertext.size () < ((2 * ECIES_ENC_BLK_SIZE) + ECIES_HMAC_SIZE) )
        throw std::runtime_error ("ciphertext too short");

    // extract IV
    ECIES_ENC_IV_TYPE iv;
    memcpy (iv.begin (), & (ciphertext.front ()), ECIES_ENC_BLK_SIZE);

    // begin decrypting
    EVP_CIPHER_CTX ctx;
    EVP_CIPHER_CTX_init (&ctx);

    ECIES_ENC_KEY_TYPE secret;
    ECIES_HMAC_KEY_TYPE hmacKey;
    getECIESSecret (otherKey, secret, hmacKey);

    if (EVP_DecryptInit_ex (&ctx, ECIES_ENC_ALGO, NULL, secret.begin (), iv.begin ()) != 1)
    {
        secret.zero ();
        hmacKey.zero ();
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("unable to init cipher");
    }

    // decrypt mac
    ECIES_HMAC_TYPE hmac;
    int outlen = ECIES_HMAC_SIZE;

    if ( (EVP_DecryptUpdate (&ctx, hmac.begin (), &outlen,
                             & (ciphertext.front ()) + ECIES_ENC_BLK_SIZE, ECIES_HMAC_SIZE + 1) != 1) || (outlen != ECIES_HMAC_SIZE) )
    {
        secret.zero ();
        hmacKey.zero ();
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("unable to extract hmac");
    }

    // decrypt plaintext (after IV and encrypted mac)
    Blob plaintext (ciphertext.size () - ECIES_HMAC_SIZE - ECIES_ENC_BLK_SIZE);
    outlen = plaintext.size ();

    if (EVP_DecryptUpdate (&ctx, & (plaintext.front ()), &outlen,
                           & (ciphertext.front ()) + ECIES_ENC_BLK_SIZE + ECIES_HMAC_SIZE + 1,
                           ciphertext.size () - ECIES_ENC_BLK_SIZE - ECIES_HMAC_SIZE - 1) != 1)
    {
        secret.zero ();
        hmacKey.zero ();
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("unable to extract plaintext");
    }

    // decrypt padding
    int flen = 0;

    if (EVP_DecryptFinal (&ctx, & (plaintext.front ()) + outlen, &flen) != 1)
    {
        secret.zero ();
        hmacKey.zero ();
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("plaintext had bad padding");
    }

    plaintext.resize (flen + outlen);

    // verify integrity
    if (hmac != makeHMAC (hmacKey, plaintext))
    {
        secret.zero ();
        hmacKey.zero ();
        EVP_CIPHER_CTX_cleanup (&ctx);
        throw std::runtime_error ("plaintext had bad hmac");
    }

    secret.zero ();
    hmacKey.zero ();

    EVP_CIPHER_CTX_cleanup (&ctx);
    return plaintext;
}
Ejemplo n.º 10
0
double
MaxY::operator()(Blob &blob) const
{ 
  const Rectangle& rect = blob.bounding_rect();
  return static_cast<double>(rect.origin().y() + rect.height());
}
Ejemplo n.º 11
0
ThreadableWebSocketChannel::SendResult WebSocketChannel::send(const Blob& binaryData)
{
    LOG(Network, "WebSocketChannel %p send() Sending Blob '%s'", this, binaryData.url().elidedString().utf8().data());
    enqueueBlobFrame(WebSocketFrame::OpCodeBinary, binaryData);
    return ThreadableWebSocketChannel::SendSuccess;
}
Ejemplo n.º 12
0
double
MinY::operator()(Blob &blob) const
{ 
  return static_cast<double>(blob.bounding_rect().origin().y());
}
Ejemplo n.º 13
0
double
Perimeter::operator()(Blob &blob) const
{ 
  return blob.perimeter();
}
Ejemplo n.º 14
0
double
Area::operator()(Blob &blob) const
{ 
  return blob.area();
}
Ejemplo n.º 15
0
void Blob<Dtype>::ShareDiff(const Blob& other) {
  CHECK_EQ(count_, other.count());
  diff_ = other.diff();
}
Ejemplo n.º 16
0
    WriteMethod ExifParser::encode(
              Blob&     blob,
        const byte*     pData,
              uint32_t  size,
              ByteOrder byteOrder,
        const ExifData& exifData
    )
    {
        ExifData ed = exifData;

        // Delete IFD0 tags that are "not recorded" in compressed images
        // Reference: Exif 2.2 specs, 4.6.8 Tag Support Levels, section A
        static const char* filteredIfd0Tags[] = {
            "Exif.Image.PhotometricInterpretation",
            "Exif.Image.StripOffsets",
            "Exif.Image.RowsPerStrip",
            "Exif.Image.StripByteCounts",
            "Exif.Image.JPEGInterchangeFormat",
            "Exif.Image.JPEGInterchangeFormatLength",
            "Exif.Image.SubIFDs"
        };
        for (unsigned int i = 0; i < EXV_COUNTOF(filteredIfd0Tags); ++i) {
            ExifData::iterator pos = ed.findKey(ExifKey(filteredIfd0Tags[i]));
            if (pos != ed.end()) {
#ifdef DEBUG
                std::cerr << "Warning: Exif tag " << pos->key() << " not encoded\n";
#endif
                ed.erase(pos);
            }
        }

        // Delete IFDs which do not occur in JPEGs
        static const IfdId filteredIfds[] = {
            subImage1Id,
            subImage2Id,
            subImage3Id,
            subImage4Id,
            panaRawIfdId,
            ifd2Id
        };
        for (unsigned int i = 0; i < EXV_COUNTOF(filteredIfds); ++i) {
#ifdef DEBUG
            std::cerr << "Warning: Exif IFD " << filteredIfds[i] << " not encoded\n";
#endif
            eraseIfd(ed, filteredIfds[i]);
        }

        // IPTC and XMP are stored elsewhere, not in the Exif APP1 segment.
        const IptcData emptyIptc;
        const XmpData  emptyXmp;

        // Encode and check if the result fits into a JPEG Exif APP1 segment
        std::auto_ptr<TiffHeaderBase> header(new TiffHeader(byteOrder));
        WriteMethod wm = TiffParserWorker::encode(blob,
                                                  pData,
                                                  size,
                                                  ed,
                                                  emptyIptc,
                                                  emptyXmp,
                                                  Tag::root,
                                                  TiffMapping::findEncoder,
                                                  header.get());
        if (blob.size() <= 65527) return wm;

        // If it doesn't fit, remove additional tags
        blob.clear();

        // Delete preview tags if the preview is larger than 32kB.
        // Todo: Enhance preview classes to be able to write and delete previews and use that instead.
        // Table must be sorted by preview, the first tag in each group is the size
        static const PreviewTags filteredPvTags[] = {
            { pttLen, "Exif.Minolta.ThumbnailLength"                  },
            { pttTag, "Exif.Minolta.ThumbnailOffset"                  },
            { pttLen, "Exif.Minolta.Thumbnail"                        },
            { pttLen, "Exif.NikonPreview.JPEGInterchangeFormatLength" },
            { pttIfd, "NikonPreview"                                  },
            { pttLen, "Exif.Olympus.ThumbnailLength"                  },
            { pttTag, "Exif.Olympus.ThumbnailOffset"                  },
            { pttLen, "Exif.Olympus.ThumbnailImage"                   },
            { pttLen, "Exif.Olympus.Thumbnail"                        },
            { pttLen, "Exif.Olympus2.ThumbnailLength"                 },
            { pttTag, "Exif.Olympus2.ThumbnailOffset"                 },
            { pttLen, "Exif.Olympus2.ThumbnailImage"                  },
            { pttLen, "Exif.Olympus2.Thumbnail"                       },
            { pttLen, "Exif.OlympusCs.PreviewImageLength"             },
            { pttTag, "Exif.OlympusCs.PreviewImageStart"              },
            { pttTag, "Exif.OlympusCs.PreviewImageValid"              },
            { pttLen, "Exif.Pentax.PreviewLength"                     },
            { pttTag, "Exif.Pentax.PreviewOffset"                     },
            { pttTag, "Exif.Pentax.PreviewResolution"                 },
            { pttLen, "Exif.Thumbnail.StripByteCounts"                },
            { pttIfd, "Thumbnail"                                     },
            { pttLen, "Exif.Thumbnail.JPEGInterchangeFormatLength"    },
            { pttIfd, "Thumbnail"                                     }
        };
        bool delTags = false;
        ExifData::iterator pos;
        for (unsigned int i = 0; i < EXV_COUNTOF(filteredPvTags); ++i) {
            switch (filteredPvTags[i].ptt_) {
            case pttLen:
                delTags = false;
                pos = ed.findKey(ExifKey(filteredPvTags[i].key_));
                if (pos != ed.end() && sumToLong(*pos) > 32768) {
                    delTags = true;
#ifndef SUPPRESS_WARNINGS
                    std::cerr << "Warning: Exif tag " << pos->key() << " not encoded\n";
#endif
                    ed.erase(pos);
                }
                break;
            case pttTag:
                if (delTags) {
                    pos = ed.findKey(ExifKey(filteredPvTags[i].key_));
                    if (pos != ed.end()) {
#ifndef SUPPRESS_WARNINGS
                        std::cerr << "Warning: Exif tag " << pos->key() << " not encoded\n";
#endif
                        ed.erase(pos);
                    }
                }
                break;
            case pttIfd:
                if (delTags) {
#ifndef SUPPRESS_WARNINGS
                    std::cerr << "Warning: Exif IFD " << filteredPvTags[i].key_ << " not encoded\n";
#endif
                    eraseIfd(ed, ExifTags::ifdIdByIfdItem(filteredPvTags[i].key_));
                }
                break;
            }
        }

        // Delete unknown tags larger than 4kB.
        for (ExifData::iterator pos = ed.begin(); pos != ed.end(); ) {
            if (   pos->size() > 4096
                && pos->tagName().substr(0, 2) == "0x") {
#ifndef SUPPRESS_WARNINGS
                std::cerr << "Warning: Exif tag " << pos->key() << " not encoded\n";
#endif
                pos = ed.erase(pos);
            }
            else {
                ++pos;
            }
        }

        // Encode the remaining Exif tags again, don't care if it fits this time
        wm = TiffParserWorker::encode(blob,
                                      pData,
                                      size,
                                      ed,
                                      emptyIptc,
                                      emptyXmp,
                                      Tag::root,
                                      TiffMapping::findEncoder,
                                      header.get());

#ifdef DEBUG
        if (wm == wmIntrusive) {
            std::cerr << "SIZE OF EXIF DATA IS " << std::dec << blob.size() << " BYTES\n";
        }
        else {
            std::cerr << "SIZE DOESN'T MATTER, NON-INTRUSIVE WRITING USED\n";
        }
#endif
        return wm;

    } // ExifParser::encode
Ejemplo n.º 17
0
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
  if (num_ != source.num() || channels_ != source.channels() ||
      height_ != source.height() || width_ != source.width()) {
    if (reshape) {
      Reshape(source.num(), source.channels(), source.height(), source.width());
    } else {
      LOG(FATAL) << "Trying to copy blobs of different sizes.";
    }
  }
  switch (Caffe::mode()) {
  case Caffe::GPU:
    if (copy_diff) {
      caffe_copy(count_, source.gpu_diff(),
          static_cast<Dtype*>(diff_->mutable_gpu_data()));
    } else {
      caffe_copy(count_, source.gpu_data(),
          static_cast<Dtype*>(data_->mutable_gpu_data()));
    }
    break;
  case Caffe::CPU:
    if (copy_diff) {
      caffe_copy(count_, source.cpu_diff(),
          static_cast<Dtype*>(diff_->mutable_cpu_data()));
    } else {
      caffe_copy(count_, source.cpu_data(),
          static_cast<Dtype*>(data_->mutable_cpu_data()));
    }
    break;
  default:
    LOG(FATAL) << "Unknown caffe mode.";
  }
}
Ejemplo n.º 18
0
int main() {
  Blob blob;
  Net ann;

  if( BUILD_BEST_NGRAMS ) {
    ifstream fin(I_F_NAME);
    if( !fin.is_open() ) {
      cout << "Couldn't open input file. Exiting." << endl;
      exit(1);
    } 
    
    // read input file
    blob.readFile(fin);
    fin.close();
    
    // Compute info gain for all ngrams, workhorse function
    blob.IG();
    
    // write out best ngrams
    ofstream fout(O_F_NAME);
    if( !fout.is_open() ) {
      cout << "Couldn't open output file. Exiting." << endl;
      exit(1);
    }
    blob.writeBest(fout);
    fout.close();
  }
  
  ifstream fin2(O_F_NAME);
  if( !fin2.is_open() ) {
    cout << "Couldn't open best grams file. Exiting." << endl;
    exit(1);
  }
  

  // Read best grams into nn object
  ann.readBestGrams(fin2);
  fin2.close();

  // read training data
  ifstream fin3(TRAIN_DATA);
  if( !fin3.is_open() ) {
    cout << "Couldn't open training file. Exiting." << endl;
    exit(1);
  }
  
  ann.readTrainingData(fin3);
  fin3.close();

  ann.train();

  ifstream fin4(TEST_DATA);
  if( !fin4.is_open() ) {
    cout << "Couldn't open testing file. Exiting." << endl;
    exit(1);
  }  

  cout << "Testing..." << flush;

  ann.readTestingData(fin4);
  fin4.close();

  ann.test();

  cout << "Done." << endl;
}
int ex_feature(int argc, char** argv){
	namespace bf=boost::filesystem;
	if (argc < 7){
		LOG(ERROR)<< "Usage: "<<argv[0]<<" pretrained_net_param feature_extraction_proto_file extract_feature_blob_name filelist meanfile mode";
		return 1;
	}
	int mode = atoi(argv[6]);
	if(mode == 1){
		LOG(ERROR) << "Using CPU";
		Caffe::set_mode(Caffe::CPU);
	}else{
		//using gpu
		LOG(ERROR)<< "Using GPU";
		uint device_id = 0;
		LOG(ERROR) << "Using Device_id=" << device_id;
		Caffe::SetDevice(device_id);
		Caffe::set_mode(Caffe::GPU);
	}
	
	Caffe::set_phase(Caffe::TEST);
	string extract_feature_blob_name=argv[3];
	//string svm_model = argv[3];
	string tst_filelist=argv[4];
	string mean_file = argv[5];
	//string save_path = argv[6];
	LOG(ERROR) << "load cnn model";
	shared_ptr<Net<Dtype> > feature_extraction_net(new Net<Dtype>(argv[2]));
	feature_extraction_net->CopyTrainedLayersFrom(argv[1]);
	//shared_ptr<Blob<Dtype> > feature_blob=feature_extraction_net->blob_by_name(extract_feature_blob_name);
	int layerIdx = feature_extraction_net->layerIdx_by_name(extract_feature_blob_name);
	if(layerIdx == -1){
		LOG(ERROR) << "Can't find layer:" << extract_feature_blob_name;
		return 1;
	}else{
		LOG(ERROR) << "LayerIdx:" << layerIdx << " continue...";
	}
	
	vector<vector<Blob<Dtype>*> >& top_vecs = feature_extraction_net->top_vecs();
	shared_ptr<Blob<Dtype> >  feature_blob(top_vecs[layerIdx][0]);
	shared_ptr<Blob<Dtype> > data_blob = feature_extraction_net->blob_by_name("data");
	LOG(ERROR) << "batch size:" << data_blob->num();
	int batch_size = data_blob->num();
	int channels = data_blob->channels();
	int height = data_blob->height();
	int width = data_blob->width();
	CHECK_EQ(height, width);
	int crop_size = height;
	//LOG(ERROR) << 
	//return 1;
	vector<string> images;
	if(!readFromFile(tst_filelist, images)){
		std::cout<< "parse Data Done." << std::endl;
	}else{
		std::cout<<"parse Data failed."<<std::endl;
		return 1;
	}
	Blob<Dtype> data_mean;
	//std::string mean_file = argv[5];
	BlobProto blob_proto;
	std::cout << "reading data_mean from " << mean_file << std::endl;
	ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);
	data_mean.FromProto(blob_proto);
	cv::Mat mat_mean = Blob2Mat<Dtype>(data_mean);
	CHECK_EQ(data_mean.num(), 1);
	CHECK_EQ(data_mean.width(), data_mean.height());
	CHECK_EQ(data_mean.channels(), 3);
	std::cout << "prepare parameters" << std::endl;

	
	float scale = 1.0;	
	//bf::path output_path(save_path);
	Blob<Dtype>* bottom = new Blob<Dtype>(batch_size, 3, crop_size, crop_size);
	vector<Blob<Dtype>*> bottomV;
	bottomV.push_back(bottom);
	int numCaches = ceil(float(images.size()) / batch_size);
	Dtype* feature_blob_data;
	Dtype* im_blob_ori;
	int num=0;
	int startIdx = 0;
	//bf::path ftrfile = output_path;
	//ftrfile.replace_extension(".ftr");
	//std::ofstream fo(ftrfile.string().c_str());
	bool multivew = false;
	LOG(ERROR) << "cachesize:" << batch_size << " numCaches:" << numCaches;
	clock_t start_processing, end_processing;
	start_processing = clock();
	for(int cacheIdx = 0;cacheIdx < numCaches;cacheIdx++){
		LOG(ERROR) << "processing:" << cacheIdx << "/" << numCaches;
		vector< vector<Dtype> > cache;
		//vector< vector<Dtype> > resultcache;
		clock_t start_cache, end_cache;
		start_cache = clock();
		vector<vector<int> > img_size;
		readImagesToCache(cache, images, crop_size, mat_mean, batch_size, &startIdx, &num, scale, multivew, img_size);
		end_cache = clock();
		LOG(ERROR) << "readImageToCache:" << (end_cache-start_cache) << "ms";
		start_cache = clock();
		int nBatches = ceil(float(cache.size()) / batch_size);
		//LOG(ERROR) << "nBatches:"<< nBatches << " cache:" << cache.size();
		assert(img_size.size() == nBatches);
		for(int batchIdx = 0;batchIdx < nBatches; batchIdx++){
			time_t start_epoch, end_epoch;
			start_epoch = time(NULL);
			LOG(ERROR) << "ResetLayer: height" <<img_size[batchIdx][0] << " width:" << img_size[batchIdx][1] ;
			bottom->Reshape(bottom->num(), bottom->channels(), img_size[batchIdx][0], img_size[batchIdx][1]);
			feature_extraction_net->ResetLayers(layerIdx, img_size[batchIdx][0], img_size[batchIdx][1]);
			int n=readImagesToBlob(*bottom, cache, batch_size, batchIdx);
			float loss = 0.0;
			LOG(ERROR) << "forward";
			const vector<Blob<Dtype>*>& result =  feature_extraction_net->Forward(bottomV, layerIdx, &loss);
			//SetTopAct<Dtype>(feature_blob);
			//int height_t = feature_blob->height();
			//int width_t = feature_blob->width();
			//LOG(ERROR) << "feature_blob:" << height_t << " " << width_t;
			//for(int hh = 0;hh < 3;hh++){
			//	for(int ww = 0;ww<3;++ww){
			//int hh=0, ww=0;
			LOG(ERROR) << "Enter coordinate to reconstruct and -1 to processing next picture";
			/*while(1){
					std::cout << "Input the position to be reconstruct (x y):";
					std::cin >> hh;
					if (hh == -1){
						break;
					}
					std::cin >> ww;
					feature_extraction_net->ReSetActions(layerIdx);
					SetTop(feature_blob, hh, ww);
					//SetTopAct<Dtype>(feature_blob);
					feature_extraction_net->Deconv(layerIdx);
					//return 1;
					int feature_num = feature_blob->num();
					int feature_dim = feature_blob->count() / feature_num;
					int start_idx=batch_size*batchIdx;
					for(int s=0;s<n;++s){
						feature_blob_data = data_blob->mutable_cpu_diff()+data_blob->offset(s);
						im_blob_ori = data_blob->mutable_cpu_data() + data_blob->offset(s);
						//vector<Dtype> result_t;
						show_reconstruct_image<Dtype>(feature_blob_data, im_blob_ori, data_blob->count() / data_blob->num(), data_blob->height(), data_blob->width(), data_blob->channels());
						for(int d=0;d<feature_dim;++d){
							result_t.push_back(feature_blob_data[d]);
						}
						resultcache.push_back(result_t);
					}
			}*/

				//}
			//}
			show_reconstruct_image_for_position<Dtype>(feature_extraction_net, feature_blob, data_blob, layerIdx, n);
			end_epoch = time(NULL);
			LOG(ERROR) << "forward batch(" << batch_size << "):" << difftime(end_epoch,start_epoch) << "s";
			//LOG(ERROR) << "BatchIdx:" << batchIdx << " n:" << n << " resultcache:" << resultcache.size();
		}
		end_cache = clock();
		LOG(ERROR) << "forward cache:" << end_cache-start_cache << "ms";

		//return 1;
		/*
		int imgIdx = startIdx - num;
		for(int s=0;s<num;++s){
			if(multivew){
				fo << images[imgIdx+s] << " " << 9*2 << " " << resultcache[0].size() << std::endl;
				for(int m=0;m<9*2;++m){
					vector<Dtype>& ftr=resultcache[s*9*2+m];
					for(int d=0;d<ftr.size()-1;++d){
						fo << ftr[d] << " ";
					}
					fo << ftr[ftr.size()-1] << std::endl;
				}
			}else{
				fo << images[imgIdx+s] << " " << 1 << " " << resultcache[0].size() << std::endl;
				vector<Dtype>& ftr=resultcache[s];
				for(int d=0;d<ftr.size()-1;++d){
						fo << ftr[d] << " ";
					//show_reconstruct_image(ftr, 
				}
				fo << ftr[ftr.size()-1] << std::endl;
			}
		}*/
	}
	//fo.close();
	end_processing = clock();
	LOG(ERROR) << "total time:" << float(end_processing-start_processing)/CLOCKS_PER_SEC << "s";
	delete bottom;
	return 0;
}
Ejemplo n.º 20
0
void SliceLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
  if (!propagate_down[0]) { return; }
  Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
  if (slice_dim_ == 0) {
    int offset_num = 0;
    for (int i = 0; i < top.size(); ++i) {
      Blob<Dtype>* blob = top[i];
      const Dtype* top_diff = blob->cpu_diff();
      caffe_copy(blob->count(), top_diff,
                 bottom_diff + (*bottom)[0]->offset(offset_num));
      offset_num += blob->num();
    }
  } else if (slice_dim_ == 1) {
    int offset_channel = 0;
    for (int i = 0; i < top.size(); ++i) {
      Blob<Dtype>* blob = top[i];
      const Dtype* top_diff = blob->cpu_diff();
      const int num_elem = blob->channels() * blob->height() * blob->width();
      for (int n = 0; n < num_; ++n) {
        caffe_copy(num_elem, top_diff + blob->offset(n),
                   bottom_diff + (*bottom)[0]->offset(n, offset_channel));
      }
      offset_channel += blob->channels();
    }
  }  // slice_dim_ is guaranteed to be 0 or 1 by SetUp.
}
Ejemplo n.º 21
0
void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
  Reshape(other.shape());
}
Ejemplo n.º 22
0
void SliceLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      vector<Blob<Dtype>*>* top) {
  const Dtype* bottom_data = bottom[0]->mutable_cpu_data();
  if (slice_dim_ == 0) {
    int offset_num = 0;
    for (int i = 0; i < top->size(); ++i) {
      Blob<Dtype>* blob = (*top)[i];
      Dtype* top_data = blob->mutable_cpu_data();
      caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num),
                 top_data);
      offset_num += blob->num();
    }
  } else if (slice_dim_ == 1) {
    int offset_channel = 0;
    for (int i = 0; i < top->size(); ++i) {
      Blob<Dtype>* blob = (*top)[i];
      Dtype* top_data = blob->mutable_cpu_data();
      const int num_elem = blob->channels() * blob->height() * blob->width();
      for (int n = 0; n < num_; ++n) {
        caffe_copy(num_elem, bottom_data + bottom[0]->offset(n, offset_channel),
                   top_data + blob->offset(n));
      }
      offset_channel += blob->channels();
    }
  }  // slice_dim_ is guaranteed to be 0 or 1 by SetUp.
}
Ejemplo n.º 23
0
void
accountTxPage (
    DatabaseCon& connection,
    AccountIDCache const& idCache,
    std::function<void (std::uint32_t)> const& onUnsavedLedger,
    std::function<void (std::uint32_t,
                        std::string const&,
                        Blob const&,
                        Blob const&)> const& onTransaction,
    AccountID const& account,
    std::int32_t minLedger,
    std::int32_t maxLedger,
    bool forward,
    Json::Value& token,
    int limit,
    bool bAdmin,
    std::uint32_t page_length)
{
    bool lookingForMarker = token.isObject();

    std::uint32_t numberOfResults;

    if (limit <= 0 || (limit > page_length && !bAdmin))
        numberOfResults = page_length;
    else
        numberOfResults = limit;

    // As an account can have many thousands of transactions, there is a limit
    // placed on the amount of transactions returned. If the limit is reached
    // before the result set has been exhausted (we always query for one more
    // than the limit), then we return an opaque marker that can be supplied in
    // a subsequent query.
    std::uint32_t queryLimit = numberOfResults + 1;
    std::uint32_t findLedger = 0, findSeq = 0;

    if (lookingForMarker)
    {
        try
        {
            if (!token.isMember(jss::ledger) || !token.isMember(jss::seq))
                return;
            findLedger = token[jss::ledger].asInt();
            findSeq = token[jss::seq].asInt();
        }
        catch (std::exception const&)
        {
            return;
        }
    }

    // We're using the token reference both for passing inputs and outputs, so
    // we need to clear it in between.
    token = Json::nullValue;

    static std::string const prefix (
        R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq,
          Status,RawTxn,TxnMeta
          FROM AccountTransactions INNER JOIN Transactions
          ON Transactions.TransID = AccountTransactions.TransID
          AND AccountTransactions.Account = '%s' WHERE
          )");

    std::string sql;

    // SQL's BETWEEN uses a closed interval ([a,b])

    if (forward && (findLedger == 0))
    {
        sql = boost::str (boost::format(
            prefix +
            (R"(AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u'
             ORDER BY AccountTransactions.LedgerSeq ASC,
             AccountTransactions.TxnSeq ASC
             LIMIT %u;)"))
            % idCache.toBase58(account)
            % minLedger
            % maxLedger
            % queryLimit);
    }
    else if (forward && (findLedger != 0))
    {
        auto b58acct = idCache.toBase58(account);
        sql = boost::str (boost::format(
            (R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq,
            Status,RawTxn,TxnMeta
            FROM AccountTransactions, Transactions WHERE
            (AccountTransactions.TransID = Transactions.TransID AND
            AccountTransactions.Account = '%s' AND
            AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u')
            OR
            (AccountTransactions.TransID = Transactions.TransID AND
            AccountTransactions.Account = '%s' AND
            AccountTransactions.LedgerSeq = '%u' AND
            AccountTransactions.TxnSeq >= '%u')
            ORDER BY AccountTransactions.LedgerSeq ASC,
            AccountTransactions.TxnSeq ASC
            LIMIT %u;
            )"))
        % b58acct
        % (findLedger + 1)
        % maxLedger
        % b58acct
        % findLedger
        % findSeq
        % queryLimit);
    }
    else if (!forward && (findLedger == 0))
    {
        sql = boost::str (boost::format(
            prefix +
            (R"(AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u'
             ORDER BY AccountTransactions.LedgerSeq DESC,
             AccountTransactions.TxnSeq DESC
             LIMIT %u;)"))
            % idCache.toBase58(account)
            % minLedger
            % maxLedger
            % queryLimit);
    }
    else if (!forward && (findLedger != 0))
    {
        auto b58acct = idCache.toBase58(account);
        sql = boost::str (boost::format(
            (R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq,
            Status,RawTxn,TxnMeta
            FROM AccountTransactions, Transactions WHERE
            (AccountTransactions.TransID = Transactions.TransID AND
            AccountTransactions.Account = '%s' AND
            AccountTransactions.LedgerSeq BETWEEN '%u' AND '%u')
            OR
            (AccountTransactions.TransID = Transactions.TransID AND
            AccountTransactions.Account = '%s' AND
            AccountTransactions.LedgerSeq = '%u' AND
            AccountTransactions.TxnSeq <= '%u')
            ORDER BY AccountTransactions.LedgerSeq DESC,
            AccountTransactions.TxnSeq DESC
            LIMIT %u;
            )"))
            % b58acct
            % minLedger
            % (findLedger - 1)
            % b58acct
            % findLedger
            % findSeq
            % queryLimit);
    }
    else
    {
        assert (false);
        // sql is empty
        return;
    }

    {
        auto db (connection.checkoutDb());

        Blob rawData;
        Blob rawMeta;

        boost::optional<std::uint64_t> ledgerSeq;
        boost::optional<std::uint32_t> txnSeq;
        boost::optional<std::string> status;
        soci::blob txnData (*db);
        soci::blob txnMeta (*db);
        soci::indicator dataPresent, metaPresent;

        soci::statement st = (db->prepare << sql,
            soci::into (ledgerSeq),
            soci::into (txnSeq),
            soci::into (status),
            soci::into (txnData, dataPresent),
            soci::into (txnMeta, metaPresent));

        st.execute ();

        while (st.fetch ())
        {
            if (lookingForMarker)
            {
                if (findLedger == ledgerSeq.value_or (0) &&
                    findSeq == txnSeq.value_or (0))
                {
                    lookingForMarker = false;
                }
            }
            else if (numberOfResults == 0)
            {
                token = Json::objectValue;
                token[jss::ledger] = rangeCheckedCast<std::uint32_t>(ledgerSeq.value_or (0));
                token[jss::seq] = txnSeq.value_or (0);
                break;
            }

            if (!lookingForMarker)
            {
                if (dataPresent == soci::i_ok)
                    convert (txnData, rawData);
                else
                    rawData.clear ();

                if (metaPresent == soci::i_ok)
                    convert (txnMeta, rawMeta);
                else
                    rawMeta.clear ();

                // Work around a bug that could leave the metadata missing
                if (rawMeta.size() == 0)
                    onUnsavedLedger(ledgerSeq.value_or (0));

                onTransaction(rangeCheckedCast<std::uint32_t>(ledgerSeq.value_or (0)),
                    *status, rawData, rawMeta);
                --numberOfResults;
            }
        }
    }

    return;
}
Ejemplo n.º 24
0
void HDF5OutputLayerTest<TypeParam>::CheckBlobEqual(const Blob<Dtype>& b1,
                                                    const Blob<Dtype>& b2) {
  EXPECT_EQ(b1.num(), b2.num());
  EXPECT_EQ(b1.channels(), b2.channels());
  EXPECT_EQ(b1.height(), b2.height());
  EXPECT_EQ(b1.width(), b2.width());
  for (int_tp n = 0; n < b1.num(); ++n) {
    for (int_tp c = 0; c < b1.channels(); ++c) {
      for (int_tp h = 0; h < b1.height(); ++h) {
        for (int_tp w = 0; w < b1.width(); ++w) {
          EXPECT_EQ(b1.data_at(n, c, h, w), b2.data_at(n, c, h, w));
        }
      }
    }
  }
}
Ejemplo n.º 25
0
ThreadableWebSocketChannel::SendResult WorkerThreadableWebSocketChannel::Bridge::send(const Blob& binaryData)
{
    if (!m_workerClientWrapper)
        return ThreadableWebSocketChannel::SendFail;
    ASSERT(m_peer);
    setMethodNotCompleted();
    m_loaderProxy.postTaskToLoader(createCallbackTask(&WorkerThreadableWebSocketChannel::mainThreadSendBlob, AllowCrossThreadAccess(m_peer), binaryData.url(), binaryData.type(), binaryData.size()));
    RefPtr<Bridge> protect(this);
    waitForMethodCompletion();
    ThreadableWebSocketChannelClientWrapper* clientWrapper = m_workerClientWrapper.get();
    if (!clientWrapper)
        return ThreadableWebSocketChannel::SendFail;
    return clientWrapper->sendRequestResult();
}
Ejemplo n.º 26
0
bool RippleAddress::signatureIsCanonical(Blob const& vchSig)
{
    return crypto_sign_check_S_lt_l(
        ((const unsigned char*) vchSig.data ()) + 32
        ) == 0;
}
 // wrap im2col/col2im so we don't have to remember the (long) argument lists
 inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) {
   if (!force_nd_im2col_ && num_spatial_axes_ == 2) {
     im2col_cpu(data, conv_in_channels_,
         conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],
         kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],
         pad_.cpu_data()[0], pad_.cpu_data()[1],
         stride_.cpu_data()[0], stride_.cpu_data()[1], col_buff);
   } else {
     im2col_nd_cpu(data, num_spatial_axes_, conv_input_shape_.cpu_data(),
         col_buffer_shape_.data(), kernel_shape_.cpu_data(),
         pad_.cpu_data(), stride_.cpu_data(), col_buff);
   }
 }
void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>* layer,
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top,
    int check_bottom, int top_id, int top_data_id, bool element_wise) {
  if (element_wise) {
    CHECK_EQ(0, layer->blobs().size());
    CHECK_LE(0, top_id);
    CHECK_LE(0, top_data_id);
    const int top_count = top[top_id]->count();
    for (int blob_id = 0; blob_id < bottom.size(); ++blob_id) {
      CHECK_EQ(top_count, bottom[blob_id]->count());
    }
  }
  // First, figure out what blobs we need to check against, and zero init
  // parameter blobs.
  vector<Blob<Dtype>*> blobs_to_check;
  vector<bool> propagate_down(bottom.size(), check_bottom == -1);
  for (int i = 0; i < layer->blobs().size(); ++i) {
    Blob<Dtype>* blob = layer->blobs()[i].get();
    caffe_set(blob->count(), static_cast<Dtype>(0), blob->mutable_cpu_diff());
    blobs_to_check.push_back(blob);
  }
  if (check_bottom == -1) {
    for (int i = 0; i < bottom.size(); ++i) {
      blobs_to_check.push_back(bottom[i]);
    }
  } else if (check_bottom >= 0) {
    CHECK_LT(check_bottom, bottom.size());
    blobs_to_check.push_back(bottom[check_bottom]);
    propagate_down[check_bottom] = true;
  }
  CHECK_GT(blobs_to_check.size(), 0) << "No blobs to check.";
  // Compute the gradient analytically using Backward
  Caffe::set_random_seed(seed_);
  // Ignore the loss from the layer (it's just the weighted sum of the losses
  // from the top blobs, whose gradients we may want to test individually).
  layer->Forward(bottom, top);
  // Get additional loss from the objective
  GetObjAndGradient(*layer, top, top_id, top_data_id);
  layer->Backward(top, propagate_down, bottom);
  // Store computed gradients for all checked blobs
  vector<shared_ptr<Blob<Dtype> > >
      computed_gradient_blobs(blobs_to_check.size());
  for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) {
    Blob<Dtype>* current_blob = blobs_to_check[blob_id];
    computed_gradient_blobs[blob_id].reset(new Blob<Dtype>());
    computed_gradient_blobs[blob_id]->ReshapeLike(*current_blob);
    const int count = blobs_to_check[blob_id]->count();
    const Dtype* diff = blobs_to_check[blob_id]->cpu_diff();
    Dtype* computed_gradients =
        computed_gradient_blobs[blob_id]->mutable_cpu_data();
    caffe_copy(count, diff, computed_gradients);
  }
  // Compute derivative of top w.r.t. each bottom and parameter input using
  // finite differencing.
  // LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
  for (int blob_id = 0; blob_id < blobs_to_check.size(); ++blob_id) {
    Blob<Dtype>* current_blob = blobs_to_check[blob_id];
    const Dtype* computed_gradients =
        computed_gradient_blobs[blob_id]->cpu_data();
    // LOG(ERROR) << "Blob " << blob_id << ": checking "
    //     << current_blob->count() << " parameters.";
    for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
      // For an element-wise layer, we only need to do finite differencing to
      // compute the derivative of top[top_id][top_data_id] w.r.t.
      // bottom[blob_id][i] only for i == top_data_id.  For any other
      // i != top_data_id, we know the derivative is 0 by definition, and simply
      // check that that's true.
      Dtype estimated_gradient = 0;
      Dtype positive_objective = 0;
      Dtype negative_objective = 0;
      if (!element_wise || (feat_id == top_data_id)) {
        // Do finite differencing.
        // Compute loss with stepsize_ added to input.
        current_blob->mutable_cpu_data()[feat_id] += stepsize_;
        Caffe::set_random_seed(seed_);
        layer->Forward(bottom, top);
        positive_objective =
            GetObjAndGradient(*layer, top, top_id, top_data_id);
        // Compute loss with stepsize_ subtracted from input.
        current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2;
        Caffe::set_random_seed(seed_);
        layer->Forward(bottom, top);
        negative_objective =
            GetObjAndGradient(*layer, top, top_id, top_data_id);
        // Recover original input value.
        current_blob->mutable_cpu_data()[feat_id] += stepsize_;
        estimated_gradient = (positive_objective - negative_objective) /
            stepsize_ / 2.;
      }
      Dtype computed_gradient = computed_gradients[feat_id];
      Dtype feature = current_blob->cpu_data()[feat_id];
      // LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
      //     << current_blob->cpu_diff()[feat_id];
      if (kink_ - kink_range_ > fabs(feature)
          || fabs(feature) > kink_ + kink_range_) {
        // We check relative accuracy, but for too small values, we threshold
        // the scale factor by 1.
        Dtype scale = std::max<Dtype>(
            std::max<Dtype>(fabs(computed_gradient), fabs(estimated_gradient)), 1.);
        EXPECT_NEAR(computed_gradient, estimated_gradient, threshold_ * scale)
          << "debug: (top_id, top_data_id, blob_id, feat_id)="
          << top_id << "," << top_data_id << "," << blob_id << "," << feat_id
          << "; feat = " << feature
          << "; objective+ = " << positive_objective
          << "; objective- = " << negative_objective;
      }
      // LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
      // LOG(ERROR) << "computed gradient: " << computed_gradient
      //    << " estimated_gradient: " << estimated_gradient;
    }
  }
}
Ejemplo n.º 29
0
// --> strIdent: public key, account ID, or regular seed.
// --> bStrict: Only allow account id or public key.
// <-- bIndex: true if iIndex > 0 and used the index.
//
// Returns a Json::objectValue, containing error information if there was one.
Json::Value accountFromString (
    Ledger::ref lrLedger,
    RippleAddress& naAccount,
    bool& bIndex,
    std::string const& strIdent,
    int const iIndex,
    bool const bStrict,
    NetworkOPs& netOps)
{
    RippleAddress   naSeed;

    if (naAccount.setAccountPublic (strIdent) ||
        naAccount.setAccountID (strIdent))
    {
        // Got the account.
        bIndex = false;
        return Json::Value (Json::objectValue);
    }

    if (bStrict)
    {
        auto success = naAccount.setAccountID (
            strIdent, Base58::getBitcoinAlphabet ());
        return rpcError (success ? rpcACT_BITCOIN : rpcACT_MALFORMED);
    }

    // Otherwise, it must be a seed.
    if (!naSeed.setSeedGeneric (strIdent))
        return rpcError (rpcBAD_SEED);

    // We allow the use of the seeds to access #0.
    // This is poor practice and merely for debugging convenience.
    RippleAddress naRegular0Public;
    RippleAddress naRegular0Private;

    auto naGenerator = RippleAddress::createGeneratorPublic (naSeed);

    naRegular0Public.setAccountPublic (naGenerator, 0);
    naRegular0Private.setAccountPrivate (naGenerator, naSeed, 0);

    SLE::pointer sleGen = netOps.getGenerator (
        lrLedger, naRegular0Public.getAccountID ());

    if (sleGen)
    {
        // Found master public key.
        Blob vucCipher = sleGen->getFieldVL (sfGenerator);
        Blob vucMasterGenerator = naRegular0Private.accountPrivateDecrypt (
            naRegular0Public, vucCipher);

        if (vucMasterGenerator.empty ())
            rpcError (rpcNO_GEN_DECRYPT);

        naGenerator.setGenerator (vucMasterGenerator);
    }
    // Otherwise, if we didn't find a generator map, assume it is a master
    // generator.

    bIndex  = !iIndex;
    naAccount.setAccountPublic (naGenerator, iIndex);

    return Json::Value (Json::objectValue);
}
Ejemplo n.º 30
0
ec_key ECDSAPublicKey (Blob const& serialized)
{
    return ECDSAPublicKey (&serialized[0], serialized.size());
}