예제 #1
0
bool hash_search( void )
/*************************/
{
    unsigned i,j;
    bool ok;

    hashsize = num_keywords;
    for( hashmask = 1; hashmask < hashsize; hashmask <<= 1 ) {
        // set hashmask to 2^n greater than or equal to hashsize
    }
    if( flags.mask_hash ) {
        hashsize = hashmask;
    }
    do {
        for( i = 0; i < max_len; ++i ) {
            for( j = 0; j < max_len + 1; ++j ) {
                ok = hash_func( j, i );
                if( ok ) {
                    if( hashsize == num_keywords ) {
                        output( "Perfect hash function found!\n" );
                    } else {
                        output( "Hash function found.\n" );
                    }
                    if( flags.tiny_output ) {
                        dump_tiny( j, i );
                    } else {
                        dump_weights( j, i );
                        dump_hash();
                    }
                    return( true );
                }
            }
        }
        if( !flags.imperfect )
            break;
        ++hashsize;
    } while( hashsize <= hashmask );
    return( false );
}
예제 #2
0
// ------------------- MAIN ----------------- //
// ------------------------------------------ //
int main(){
    srand((unsigned int)time(NULL));
    
    Vector* training_data[MAX_INPUT_LENGHT];
    Vector* teaching_data[MAX_INPUT_LENGHT];
    for (int i = 0; i < MAX_INPUT_LENGHT; i++) {
        training_data[i] = new_vec(DIMENSION_INPUT+1);
        teaching_data[i] = new_vec(DIMENSION_OUTPUT);
    }
    size_t TRAINING_SET_SIZE = 0;
    TRAINING_SET_SIZE = read_input(training_data, teaching_data);
    
    // in_layer, out_layer, hid_layer_count, hid_layers
    Network* network = new_network(DIMENSION_INPUT, DIMENSION_OUTPUT, 2, 4, 4);
//    print_network(network);
    
    Vector*** best_weights = malloc((network->hidden_layers_count+1) * sizeof(Vector**));
    for (size_t layer = 0; layer < network->hidden_layers_count; layer++) {
        best_weights[layer] = malloc(network->hidden_layers[layer]->size * sizeof(Vector*));
        for (size_t neuron_id = 0; neuron_id < network->hidden_layers[layer]->size; neuron_id++) {
            best_weights[layer][neuron_id] = new_vec(network->hidden_layers[layer]->neurons[neuron_id]->weights->length);
        }
    }
    best_weights[network->hidden_layers_count] = malloc(network->output_layer->size * sizeof(Vector*));
    for (size_t neuron_id = 0; neuron_id < network->output_layer->size; neuron_id++) {
        best_weights[network->hidden_layers_count][neuron_id] = new_vec(network->output_layer->neurons[neuron_id]->weights->length);
    }
    
    time_t time_at_beginning = time(0);
    
    double total_error_old = FLOAT_MAX;
    double total_error = 1.0;
    double minimum_error_achieved = FLOAT_MAX;
    double epsilon = 0.0001;
    size_t epoch_count = 0;
    
    while ((time(0) - time_at_beginning) < 30 && (total_error = error_total(network, training_data, teaching_data, TRAINING_SET_SIZE)) > epsilon) {
        if (minimum_error_achieved > total_error) {
            minimum_error_achieved = total_error;
            dump_weights(network, best_weights);
//            print_detailed_layer(network->hidden_layers[1]);
        }
        for (size_t i = 0; i < TRAINING_SET_SIZE; i++) {
            train_network_with_backprop(network, training_data[i], teaching_data[i]);
        }
        
        if (epoch_count % 1000 == 0) {
            
//            printf("Epochs count: %ld\n",epoch_count);
            if (fabs(total_error - total_error_old) < 0.001) {
//                printf("Shaking Weights!\n");
                shake_weights(network);
            }
            total_error_old = total_error;
//            printf("Total error: %.15lf\n", total_error);
        }
        update_learning_rate(network, ++epoch_count);
        scramble_data(training_data, teaching_data, TRAINING_SET_SIZE);
    }
    
//    printf("Network training finished with a total error: %.15lf\n", total_error);
//    printf("Network training achieved a minimum total error: %.15lf\n", minimum_error_achieved);
//    print_detailed_layer(network->hidden_layers[1]);
    load_weights(network, best_weights);
//    print_detailed_layer(network->input_layer);
//    print_detailed_layer(network->hidden_layers[0]);
//    print_detailed_layer(network->hidden_layers[1]);
//    print_detailed_layer(network->output_layer);
    test_network(network);
    
    for (size_t layer = 0; layer < network->hidden_layers_count; layer++) {
        for (size_t neuron_id = 0; neuron_id < network->hidden_layers[layer]->size; neuron_id++) {
            delete_vec(best_weights[layer][neuron_id]);
        }
    }
    for (size_t neuron_id = 0; neuron_id < network->output_layer->size; neuron_id++) {
        delete_vec(best_weights[network->hidden_layers_count][neuron_id]);
    }
    
    delete_network(network);
    
    for (int i = 0; i < MAX_INPUT_LENGHT; i++) {
        delete_vec(training_data[i]);
        delete_vec(teaching_data[i]);
    }
    
    
    return EXIT_SUCCESS;
}
예제 #3
0
void lbann_callback_dump_weights::on_epoch_end(model *m) {
  dump_weights(m);
}
예제 #4
0
void lspanssi_quantise(float *x, float *xq, int ndim, int mbest_entries)
{
  int i, j, n1, n2, n3, n4;
  float w[LPC_ORD];
  const float *codebook1 = lsp_cbvqanssi[0].cb;
  const float *codebook2 = lsp_cbvqanssi[1].cb;
  const float *codebook3 = lsp_cbvqanssi[2].cb;
  const float *codebook4 = lsp_cbvqanssi[3].cb;
  struct MBEST *mbest_stage1, *mbest_stage2, *mbest_stage3, *mbest_stage4;
  float target[LPC_ORD];
  int   index[MBEST_STAGES];

  mbest_stage1 = mbest_create(mbest_entries);
  mbest_stage2 = mbest_create(mbest_entries);
  mbest_stage3 = mbest_create(mbest_entries);
  mbest_stage4 = mbest_create(mbest_entries);
  for(i=0; i<MBEST_STAGES; i++)
      index[i] = 0;

  compute_weights_anssi_mode2(x, w, ndim);

  #ifdef DUMP
  dump_weights(w, ndim);
  #endif

  /* Stage 1 */

  mbest_search(codebook1, x, w, ndim, lsp_cbvqanssi[0].m, mbest_stage1, index);
  mbest_print("Stage 1:", mbest_stage1);

  /* Stage 2 */

  for (j=0; j<mbest_entries; j++) {
      index[1] = n1 = mbest_stage1->list[j].index[0];
      for(i=0; i<ndim; i++)
	  target[i] = x[i] - codebook1[ndim*n1+i];
      mbest_search(codebook2, target, w, ndim, lsp_cbvqanssi[1].m, mbest_stage2, index);
  }
  mbest_print("Stage 2:", mbest_stage2);

  /* Stage 3 */

  for (j=0; j<mbest_entries; j++) {
      index[2] = n1 = mbest_stage2->list[j].index[1];
      index[1] = n2 = mbest_stage2->list[j].index[0];
      for(i=0; i<ndim; i++)
	  target[i] = x[i] - codebook1[ndim*n1+i] - codebook2[ndim*n2+i];
      mbest_search(codebook3, target, w, ndim, lsp_cbvqanssi[2].m, mbest_stage3, index);
  }
  mbest_print("Stage 3:", mbest_stage3);

  /* Stage 4 */

  for (j=0; j<mbest_entries; j++) {
      index[3] = n1 = mbest_stage3->list[j].index[2];
      index[2] = n2 = mbest_stage3->list[j].index[1];
      index[1] = n3 = mbest_stage3->list[j].index[0];
      for(i=0; i<ndim; i++)
	  target[i] = x[i] - codebook1[ndim*n1+i] - codebook2[ndim*n2+i] - codebook3[ndim*n3+i];
      mbest_search(codebook4, target, w, ndim, lsp_cbvqanssi[3].m, mbest_stage4, index);
  }
  mbest_print("Stage 4:", mbest_stage4);

  n1 = mbest_stage4->list[0].index[3];
  n2 = mbest_stage4->list[0].index[2];
  n3 = mbest_stage4->list[0].index[1];
  n4 = mbest_stage4->list[0].index[0];
  for (i=0;i<ndim;i++)
      xq[i] = codebook1[ndim*n1+i] + codebook2[ndim*n2+i] + codebook3[ndim*n3+i] + codebook4[ndim*n4+i];

  mbest_destroy(mbest_stage1);
  mbest_destroy(mbest_stage2);
  mbest_destroy(mbest_stage3);
  mbest_destroy(mbest_stage4);
}
예제 #5
0
void lbann_callback_dump_weights::on_train_begin(model *m) {
  dump_weights(m, "initial");
}