Пример #1
0
void load_weights_upto(network *net, char *filename, int cutoff)
{
    fprintf(stderr, "Loading weights from %s...", filename);
    fflush(stdout);
    FILE *fp = fopen(filename, "r");
    if(!fp) file_error(filename);

    float garbage;
    fread(&garbage, sizeof(float), 1, fp);
    fread(&garbage, sizeof(float), 1, fp);
    fread(&garbage, sizeof(float), 1, fp);
    fread(net->seen, sizeof(int), 1, fp);

    int i;
    for(i = 0; i < net->n && i < cutoff; ++i){
        layer l = net->layers[i];
        if (l.dontload) continue;
        if(l.type == CONVOLUTIONAL){
            int num = l.n*l.c*l.size*l.size;
            fread(l.biases, sizeof(float), l.n, fp);
            if (l.batch_normalize && (!l.dontloadscales)){
                fread(l.scales, sizeof(float), l.n, fp);
                fread(l.rolling_mean, sizeof(float), l.n, fp);
                fread(l.rolling_variance, sizeof(float), l.n, fp);
            }
            fread(l.filters, sizeof(float), num, fp);
#ifdef GPU
            if(gpu_index >= 0){
                push_convolutional_layer(l);
            }
#endif
        }
        if(l.type == DECONVOLUTIONAL){
            int num = l.n*l.c*l.size*l.size;
            fread(l.biases, sizeof(float), l.n, fp);
            fread(l.filters, sizeof(float), num, fp);
#ifdef GPU
            if(gpu_index >= 0){
                push_deconvolutional_layer(l);
            }
#endif
        }
        if(l.type == CONNECTED){
            fread(l.biases, sizeof(float), l.outputs, fp);
            fread(l.weights, sizeof(float), l.outputs*l.inputs, fp);
#ifdef GPU
            if(gpu_index >= 0){
                push_connected_layer(l);
            }
#endif
        }
    }
    fprintf(stderr, "Done!\n");
    fclose(fp);
}
Пример #2
0
void load_weights_upto(network *net, char *filename, int cutoff)
{
    fprintf(stderr, "Loading weights from %s...", filename);
    fflush(stdout);
    FILE *fp = fopen(filename, "r");
    if(!fp) file_error(filename);

    fread(&net->learning_rate, sizeof(float), 1, fp);
    fread(&net->momentum, sizeof(float), 1, fp);
    fread(&net->decay, sizeof(float), 1, fp);
    fread(&net->seen, sizeof(int), 1, fp);

    int i;
    for(i = 0; i < net->n && i < cutoff; ++i){
        layer l = net->layers[i];
        if(l.type == CONVOLUTIONAL){
            int num = l.n*l.c*l.size*l.size;
            fread(l.biases, sizeof(float), l.n, fp);
            fread(l.filters, sizeof(float), num, fp);
#ifdef GPU
            if(gpu_index >= 0){
                push_convolutional_layer(l);
            }
#endif
        }
        if(l.type == DECONVOLUTIONAL){
            int num = l.n*l.c*l.size*l.size;
            fread(l.biases, sizeof(float), l.n, fp);
            fread(l.filters, sizeof(float), num, fp);
#ifdef GPU
            if(gpu_index >= 0){
                push_deconvolutional_layer(l);
            }
#endif
        }
        if(l.type == CONNECTED){
            fread(l.biases, sizeof(float), l.outputs, fp);
            fread(l.weights, sizeof(float), l.outputs*l.inputs, fp);
#ifdef GPU
            if(gpu_index >= 0){
                push_connected_layer(l);
            }
#endif
        }
    }
    fprintf(stderr, "Done!\n");
    fclose(fp);
}
Пример #3
0
connected_layer parse_connected(list *options, size_params params)
{
    int output = option_find_int(options, "output",1);
    char *activation_s = option_find_str(options, "activation", "logistic");
    ACTIVATION activation = get_activation(activation_s);

    connected_layer layer = make_connected_layer(params.batch, params.inputs, output, activation);

    char *weights = option_find_str(options, "weights", 0);
    char *biases = option_find_str(options, "biases", 0);
    parse_data(biases, layer.biases, output);
    parse_data(weights, layer.weights, params.inputs*output);
    #ifdef GPU
    if(weights || biases) push_connected_layer(layer);
    #endif
    return layer;
}
Пример #4
0
void load_connected_weights(layer l, FILE *fp, int transpose)
{
    fread(l.biases, sizeof(float), l.outputs, fp);
    fread(l.weights, sizeof(float), l.outputs*l.inputs, fp);
    if(transpose){
        transpose_matrix(l.weights, l.inputs, l.outputs);
    }
    //printf("Biases: %f mean %f variance\n", mean_array(l.biases, l.outputs), variance_array(l.biases, l.outputs));
    //printf("Weights: %f mean %f variance\n", mean_array(l.weights, l.outputs*l.inputs), variance_array(l.weights, l.outputs*l.inputs));
    if (l.batch_normalize && (!l.dontloadscales)){
        fread(l.scales, sizeof(float), l.outputs, fp);
        fread(l.rolling_mean, sizeof(float), l.outputs, fp);
        fread(l.rolling_variance, sizeof(float), l.outputs, fp);
        //printf("Scales: %f mean %f variance\n", mean_array(l.scales, l.outputs), variance_array(l.scales, l.outputs));
        //printf("rolling_mean: %f mean %f variance\n", mean_array(l.rolling_mean, l.outputs), variance_array(l.rolling_mean, l.outputs));
        //printf("rolling_variance: %f mean %f variance\n", mean_array(l.rolling_variance, l.outputs), variance_array(l.rolling_variance, l.outputs));
    }
#ifdef GPU
    if(gpu_index >= 0){
        push_connected_layer(l);
    }
#endif
}
Пример #5
0
void push_rnn_layer(layer l)
{
    push_connected_layer(*(l.input_layer));
    push_connected_layer(*(l.self_layer));
    push_connected_layer(*(l.output_layer));
}
Пример #6
0
void client_update(network net, char *address)
{
    int fd = socket_setup(0);

    struct hostent *hp;     /* host information */
    struct sockaddr_in server;    /* server address */

    /* fill in the server's address and data */
    bzero((char*)&server, sizeof(server));
    server.sin_family = AF_INET;
    server.sin_port = htons(SERVER_PORT);

    /* look up the address of the server given its name */
    hp = gethostbyname(address);
    if (!hp) {
        perror("no such host");
        fprintf(stderr, "could not obtain address of %s\n", "localhost");
    }

    /* put the host's address into the server address structure */
    memcpy((void *)&server.sin_addr, hp->h_addr_list[0], hp->h_length);
    if (connect(fd, (struct sockaddr *) &server, sizeof(server)) < 0) {
        error("error connecting");
    }

    /* send a message to the server */
    int i;
    //printf("Sending\n");
    for(i = 0; i < net.n; ++i){
        if(net.layers[i].type == CONVOLUTIONAL){
            convolutional_layer layer = net.layers[i];
            write_all(fd, (char*) layer.bias_updates, layer.n*sizeof(float));
            int num = layer.n*layer.c*layer.size*layer.size;
            write_all(fd, (char*) layer.filter_updates, num*sizeof(float));
            memset(layer.bias_updates, 0, layer.n*sizeof(float));
            memset(layer.filter_updates, 0, num*sizeof(float));
        }
        if(net.layers[i].type == CONNECTED){
            connected_layer layer = net.layers[i];
            write_all(fd, (char *)layer.bias_updates, layer.outputs*sizeof(float));
            write_all(fd, (char *)layer.weight_updates, layer.outputs*layer.inputs*sizeof(float));
            memset(layer.bias_updates, 0, layer.outputs*sizeof(float));
            memset(layer.weight_updates, 0, layer.inputs*layer.outputs*sizeof(float));
        }
    }
    //printf("Sent\n");

    for(i = 0; i < net.n; ++i){
        if(net.layers[i].type == CONVOLUTIONAL){
            convolutional_layer layer = net.layers[i];

            read_all(fd, (char*) layer.biases, layer.n*sizeof(float));
            int num = layer.n*layer.c*layer.size*layer.size;
            read_all(fd, (char*) layer.filters, num*sizeof(float));

#ifdef GPU
            push_convolutional_layer(layer);
            #endif
        }
        if(net.layers[i].type == CONNECTED){
            connected_layer layer = net.layers[i];

            read_all(fd, (char *)layer.biases, layer.outputs*sizeof(float));
            read_all(fd, (char *)layer.weights, layer.outputs*layer.inputs*sizeof(float));

#ifdef GPU
            push_connected_layer(layer);
            #endif
        }
    }
    //printf("Updated\n");
    close(fd);
}