static void test_conv_init()
{
    printf("test_conv_init...");

    unsigned int img_width = 512;
    unsigned int img_height = 512;
    unsigned int bitsperpixel = 24;
    int no_of_layers = 3;
    int max_features = 20;
    int reduction_factor = 4;
    int pooling_factor = 2;
    float error_threshold[] = {0.0, 0.0, 0.0};
    unsigned int random_seed = 648326;
    deeplearn_conv conv;

    assert(conv_init(no_of_layers,
                     img_width, img_height,
                     bitsperpixel/8, max_features,
                     reduction_factor, pooling_factor,
                     &conv, error_threshold,
                     &random_seed) == 0);
    conv_free(&conv);

    printf("Ok\n");
}
Exemple #2
0
int main_conv(int argc, char* argv[])
{
	cmdline(&argc, argv, 4, 4, usage_str, help_str, 0, NULL);

	num_init();

	unsigned int flags = atoi(argv[1]);

	unsigned int N = DIMS;
	long dims[N];
	const complex float* in = load_cfl(argv[2], N, dims);

	long krn_dims[N];
	const complex float* krn = load_cfl(argv[3], N, krn_dims);
	complex float* out = create_cfl(argv[4], N, dims);

	struct conv_plan* plan = conv_plan(N, flags, CONV_CYCLIC, CONV_SYMMETRIC, dims, dims, krn_dims, krn);
	conv_exec(plan, out, in);
	conv_free(plan);

	unmap_cfl(N, dims, out);
	unmap_cfl(N, krn_dims, krn);
	unmap_cfl(N, dims, in);
	exit(0);
}
Exemple #3
0
static void linop_conv_free(const linop_data_t* _data)
{
	struct conv_data_s* data = CAST_DOWN(conv_data_s, _data);

	conv_free(data->plan);

	free(data);
}
int main_conv(int argc, char* argv[])
{
	int c;

	while (-1 != (c = getopt(argc, argv, "ah"))) {

                switch (c) {

                case 'h':
                        usage(argv[0], stdout);
                        help();
                        exit(0);

                default:
                        usage(argv[0], stderr);
                        exit(1);
                }
        }

        if (argc - optind != 4) {

                usage(argv[0], stderr);
                exit(1);
        }

	unsigned int flags = atoi(argv[optind + 0]);

	unsigned int N = DIMS;
	long dims[N];
	const complex float* in = load_cfl(argv[optind + 1], N, dims);

	long krn_dims[N];
	const complex float* krn = load_cfl(argv[optind + 2], N, krn_dims);
	complex float* out = create_cfl(argv[optind + 3], N, dims);

	struct conv_plan* plan = conv_plan(N, flags, CONV_CYCLIC, CONV_SYMMETRIC, dims, dims, krn_dims, krn);
	conv_exec(plan, out, in);
	conv_free(plan);

	unmap_cfl(N, dims, out);
	unmap_cfl(N, krn_dims, krn);
	unmap_cfl(N, dims, in);
	exit(0);
}
static void test_conv_image()
{
    printf("test_conv_image...");

    unsigned int img_width = 10;
    unsigned int img_height = 10;
    unsigned int bitsperpixel = 0;
    int no_of_layers = 3;
    int max_features = 20;
    int reduction_factor = 6;
    int pooling_factor = 2;
    float error_threshold[] = {0.0, 0.0, 0.0};
    unsigned int random_seed = 648326;
    unsigned char * img, * img2;
    deeplearn_conv conv;
    float BPerror = -1;
    char plot_filename[256];
    char plot_title[256];

    /* load image from file */
    assert(deeplearn_read_png_file((char*)"Lenna.png",
                                   &img_width, &img_height,
                                   &bitsperpixel, &img)==0);

    img2 = (unsigned char*)malloc(128*128*3*sizeof(unsigned char));
    assert(img2);
    deeplearn_downsample(img, img_width, img_height,
                         img2, 128, 128);
    free(img);
    img = img2;
    img_width = 128;
    img_height = 128;

    assert(conv_init(no_of_layers,
                     img_width, img_height,
                     bitsperpixel/8, max_features,
                     reduction_factor, pooling_factor,
                     &conv, error_threshold,
                     &random_seed) == 0);

    int conv0_size =
        conv.layer[0].units_across *
        conv.layer[0].units_down * max_features;

    for (int i = 0; i < 4; i++) {
        for (int j = 0; j < conv0_size; j++) {
            conv.layer[0].convolution[j] = -9999;
        }

        assert(conv_img(img, &conv) == 0);

        /* check that some convolution happened */
        for (int j = 0; j < conv0_size; j++) {
            assert(conv.layer[0].convolution[j] != -9999);
        }

        /* error should be >= 0 */
        assert(conv.BPerror >= 0);
        /* error should be reducing */
        if (i > 0) {
            assert(conv.BPerror < BPerror);
        }
        BPerror = conv.BPerror;
    }

    /* move to hte next layer */
    conv.BPerror = -1;
    conv.current_layer++;

    int conv1_size =
        conv.layer[1].units_across *
        conv.layer[1].units_down * max_features;

    for (int i = 0; i < 4; i++) {
        for (int j = 0; j < conv1_size; j++) {
            conv.layer[1].convolution[j] = -9999;
        }

        assert(conv_img(img, &conv) == 0);

        /* check that some convolution happened */
        for (int j = 0; j < conv1_size; j++) {
            assert(conv.layer[1].convolution[j] != -9999);
        }

        /* error should be >= 0 */
        if (!(conv.BPerror >= 0)) {
            printf("\nBPerror: %f\n",conv.BPerror);
        }
        assert(conv.BPerror >= 0);
        /* error should be reducing */
        if (i > 0) {
            assert(conv.BPerror < BPerror);
        }
        BPerror = conv.BPerror;
    }

    sprintf(plot_filename,"/tmp/%s","libdeep_conv.png");
    sprintf(plot_title,"%s","Convolution Training Error");

    assert(conv_plot_history(&conv, plot_filename,
                             plot_title,
                             1024, 640) == 0);
    conv_free(&conv);
    free(img);

    printf("Ok\n");
}
static void test_conv_save_load()
{
    printf("test_conv_save_load...");

    unsigned int img_width = 512;
    unsigned int img_height = 512;
    unsigned int bitsperpixel = 24;
    int i, no_of_layers = 3;
    int max_features = 20;
    int reduction_factor = 4;
    int pooling_factor = 2;
    float error_threshold[] = {0.1, 0.2, 0.3};
    float error_threshold2[] = {0.6, 0.7, 0.8};
    unsigned int random_seed = 648326;
    deeplearn_conv conv1;
    deeplearn_conv conv2;
    FILE * fp;
    char filename[256];

    assert(conv_init(no_of_layers,
                     img_width, img_height,
                     bitsperpixel/8, max_features,
                     reduction_factor, pooling_factor,
                     &conv1, error_threshold,
                     &random_seed) == 0);

    sprintf(filename, "/tmp/%s", "libdeep_conv.dat");

    /* save */
    fp = fopen(filename,"w");
    assert(fp);
    assert(conv_save(fp, &conv1) == 0);
    fclose(fp);

    /* set some different values */
    conv2.reduction_factor = 45;
    conv2.pooling_factor = 8;
    conv2.inputs_across = 100;
    conv2.inputs_down = 200;
    conv2.inputs_depth = 16;
    conv2.no_of_layers = 2;
    conv2.max_features = 15;
    memcpy((void*)conv2.error_threshold,
           error_threshold2,
           conv2.no_of_layers*sizeof(float));
    conv2.random_seed = 20313;
    conv2.enable_learning = 0;
    conv2.current_layer = 4577;
    conv2.training_complete = 3;
    conv2.itterations = 642;
    conv2.layer[0].autocoder=NULL;

    /* load */
    fp = fopen(filename,"r");
    assert(fp);
    assert(conv_load(fp, &conv2) == 0);
    fclose(fp);

    /* compare the results */
    assert(conv2.layer[0].autocoder != NULL);
    assert(conv2.layer[0].autocoder->inputs != NULL);
    assert(conv2.layer[0].autocoder->hiddens != NULL);
    assert(conv1.layer[0].autocoder->NoOfInputs ==
           conv2.layer[0].autocoder->NoOfInputs);
    assert(conv1.layer[0].autocoder->NoOfHiddens ==
           conv2.layer[0].autocoder->NoOfHiddens);
    assert(conv1.reduction_factor == conv2.reduction_factor);
    assert(conv1.pooling_factor == conv2.pooling_factor);
    assert(conv1.random_seed != conv2.random_seed);
    assert(conv1.inputs_across == conv2.inputs_across);
    assert(conv1.inputs_down == conv2.inputs_down);
    assert(conv1.inputs_depth == conv2.inputs_depth);
    assert(conv1.max_features == conv2.max_features);
    assert(conv1.no_of_layers == conv2.no_of_layers);
    assert(conv1.enable_learning == conv2.enable_learning);
    assert(conv1.current_layer == conv2.current_layer);
    assert(conv1.training_complete == conv2.training_complete);
    assert(conv1.itterations == conv2.itterations);
    for (i = 0; i < conv1.no_of_layers; i++) {
        for (int j = 0; j < conv1.layer[i].autocoder->NoOfInputs*
                 conv1.layer[i].autocoder->NoOfHiddens; j++) {
            assert(conv1.layer[i].autocoder->weights[j] > -0.3f);
            assert(conv1.layer[i].autocoder->weights[j] < 0.3f);
            assert(conv2.layer[i].autocoder->weights[j] > -0.3f);
            assert(conv2.layer[i].autocoder->weights[j] < 0.3f);
        }
        assert(conv1.error_threshold[i] == conv2.error_threshold[i]);
        if ((conv1.layer[i].autocoder != NULL) &&
            (conv2.layer[i].autocoder != NULL)) {
            assert(autocoder_compare(conv1.layer[i].autocoder,
                                     conv2.layer[i].autocoder) == 0);
        }
        assert(conv1.layer[i].units_across == conv2.layer[i].units_across);
        assert(conv1.layer[i].units_down == conv2.layer[i].units_down);
        assert(conv1.layer[i].pooling_factor == conv2.layer[i].pooling_factor);
    }

    conv_free(&conv1);
    conv_free(&conv2);

    printf("Ok\n");
}