예제 #1
0
파일: rtspin_hime.c 프로젝트: felipeqc/semi
static void configure_loop(void)
{
	double start;

	/* prime cache */
	loop_once();
	loop_once();
	loop_once();

	/* measure */
	start = wctime();
	loop_once(); /* hope we didn't get preempted  */
	loop_length = wctime();
	loop_length -= start;

	/* fine tune */
	fine_tune(0.1);
	fine_tune(0.1);
	fine_tune(0.1);
}
예제 #2
0
파일: sgd_perf.cpp 프로젝트: wichtounet/dll
int main(int /*argc*/, char* /*argv*/ []) {
    // First experiment : Dense - Dense - Dense
    // Current speed on frigg:
    //   ~20 seconds (mkl, default options)
    //   ~13 seconds (mkl-threads, default options)

    auto dataset = mnist::read_dataset_direct<std::vector, etl::dyn_vector<float>>();
    dataset.training_images.resize(10000);
    dataset.training_labels.resize(10000);

    auto n = dataset.training_images.size();
    std::cout << n << " samples to test" << std::endl;

    mnist::binarize_dataset(dataset);

    using dbn_t = dll::dbn_desc<
        dll::dbn_layers<
            dll::dense_layer_desc<28 * 28, 500>::layer_t,
            dll::dense_layer_desc<500, 250>::layer_t,
            dll::dense_layer_desc<250, 10, dll::activation<dll::function::SOFTMAX>>::layer_t>,
        dll::updater<dll::updater_type::MOMENTUM>, dll::batch_size<100>, dll::trainer<dll::sgd_trainer>>::dbn_t;

    auto net = std::make_unique<dbn_t>();

    // Train the network for performance sake
    net->display();
    net->fine_tune(dataset.training_images, dataset.training_labels, 20);

    std::cout << "DLL Timers" << std::endl;
    dll::dump_timers();

    std::cout << "ETL Counters" << std::endl;
    etl::dump_counters();

    return 0;
}
예제 #3
0
    typedef dll::dbn_desc<
        dll::dbn_layers<
        dll::rbm_desc<28 * 28, 100, dll::momentum, dll::batch_size<25>, dll::init_weights>::rbm_t,
        dll::rbm_desc<100, 200, dll::momentum, dll::batch_size<25>>::rbm_t,
        dll::rbm_desc<200, 10, dll::momentum, dll::batch_size<25>, dll::hidden<dll::unit_type::SOFTMAX>>::rbm_t>>::dbn_t dbn_t;

    auto dataset = mnist::read_dataset<std::vector, std::vector, double>(500);

    REQUIRE(!dataset.training_images.empty());

    mnist::binarize_dataset(dataset);

    auto dbn = std::make_unique<dbn_t>();

    dbn->pretrain(dataset.training_images, 20);
    auto error = dbn->fine_tune(dataset.training_images, dataset.training_labels, 10, 50);

    REQUIRE(error < 5e-2);

    auto test_error = dll::test_set(dbn, dataset.test_images, dataset.test_labels, dll::predictor());

    std::cout << "test_error:" << test_error << std::endl;

    REQUIRE(test_error < 0.2);
}

TEST_CASE( "dbn/mnist_2", "dbn::containers" ) {
    typedef dll::dbn_desc<
        dll::dbn_layers<
        dll::rbm_desc<28 * 28, 100, dll::momentum, dll::batch_size<25>, dll::init_weights>::rbm_t,
        dll::rbm_desc<100, 200, dll::momentum, dll::batch_size<25>>::rbm_t,
예제 #4
0
파일: lstm.cpp 프로젝트: wichtounet/dll
    constexpr size_t sequence_length = 28;
    constexpr size_t hidden_units    = 75;

    using network_t = dll::dyn_network_desc<
        dll::network_layers<
            dll::lstm_layer<time_steps, sequence_length, hidden_units, dll::last_only>,
            dll::recurrent_last_layer<time_steps, hidden_units>,
            dll::dense_layer<hidden_units, 10, dll::softmax>
        >
        , dll::updater<dll::updater_type::ADAM>      // Adam
        , dll::batch_size<100>                       // The mini-batch size
    >::network_t;

    auto net = std::make_unique<network_t>();

    REQUIRE(net->fine_tune(dataset.train(), 30) < 0.15);
    REQUIRE(net->evaluate_error(dataset.test()) < 0.25);
}

// Simple LSTM with truncation
TEST_CASE("unit/lstm/2", "[unit][lstm]") {
    auto dataset = dll::make_mnist_dataset_nc_sub(0, 2000, dll::batch_size<100>{}, dll::scale_pre<255>{});

    constexpr size_t time_steps      = 28;
    constexpr size_t sequence_length = 28;
    constexpr size_t hidden_units    = 75;

    using network_t = dll::dyn_network_desc<
        dll::network_layers<
            dll::lstm_layer<time_steps, sequence_length, hidden_units, dll::last_only, dll::truncate<20>>,
            dll::recurrent_last_layer<time_steps, hidden_units>,
예제 #5
0
    using train_generator_t = dll::inmemory_data_generator_desc<dll::batch_size<25>, dll::categorical, dll::scale_pre<255>>;

    auto train_generator = dll::make_generator(
        dataset.training_images, dataset.training_labels,
        dataset.training_images.size(), 10,
        train_generator_t{});

    auto test_generator = dll::make_generator(
        dataset.test_images, dataset.test_labels,
        dataset.test_images.size(), 10,
        train_generator_t{});

    auto dbn = std::make_unique<dbn_t>();

    auto error = dbn->fine_tune(*train_generator, 50);
    std::cout << "error:" << error << std::endl;
    CHECK(error < 5e-2);

    auto test_error = dbn->evaluate_error(*test_generator);
    std::cout << "test_error:" << test_error << std::endl;
    CHECK(test_error < 0.3);
}

// Use a simple in-memory generator for pretraining and fine-tuning
TEST_CASE("unit/augment/mnist/2", "[dbn][unit]") {
    typedef dll::dbn_desc<
        dll::dbn_layers<
            dll::rbm_desc<28 * 28, 200, dll::momentum, dll::batch_size<10>>::layer_t,
            dll::rbm_desc<200, 300, dll::momentum, dll::batch_size<10>>::layer_t,
            dll::rbm_desc<300, 10, dll::momentum, dll::batch_size<10>, dll::hidden<dll::unit_type::SOFTMAX>>::layer_t>,