clann_real_type metric_hausdorff(const struct matrix *a, const struct matrix *b) { unsigned int i, j; clann_real_type inf, sup = 0, d, *x, *y; for (i = 0; i < a->rows; i++) { inf = (clann_real_type) INT_MAX; x = matrix_value(a, i, 0); for (j = 0; j < b->rows; j++) { y = matrix_value(b, j, 0); d = metric_euclidean(x, y, a->cols); if (d < inf) inf = d; } if (inf > sup) sup = inf; } return sup; }
unsigned int metric_hausdorff_limit(const struct matrix *a, const struct matrix *b, clann_real_type limit) { unsigned int i, j, count = 0; clann_real_type inf, d, *x, *y; for (i = 0; i < a->rows; i++) { inf = (clann_real_type) INT_MAX; x = matrix_value(a, i, 0); for (j = 0; j < b->rows; j++) { y = matrix_value(b, j, 0); d = metric_euclidean(x, y, a->cols); if (d < inf) inf = d; } if (inf > limit) count++; } return count; }
void som_train_batch(struct som *ann, struct matrix *x, unsigned int epochs) { clann_real_type *sample, *winner = NULL; unsigned int s; while (ann->epoch <= epochs) { som_adjust_width(ann); /* * For each input sample `s' */ for (s = 0; s < x->rows; s++) { sample = matrix_value(x, s, 0); som_find_winner_neuron(ann, sample, &winner); som_batch_adjust_of_weights(ann, sample, winner); } #if CLANN_VERBOSE printf("N. [SOM] Width: "CLANN_PRINTF", Rate: "CLANN_PRINTF" (%d).\n", ann->actual_width, ann->actual_learning_rate, ann->epoch); #endif ann->epoch += ann->step; } }
unsigned int metric_hausdorff_angle(const struct matrix *a, const struct matrix *b, clann_real_type limit) { unsigned int i, j, length; clann_real_type inf, angle, count = 0, d, *x, *y, a_c[2], b_c[2]; length = a->cols > b->cols ? b->cols - 1 : a->cols - 1; for (i = 0; i < a->rows; i++) { inf = (clann_real_type) INT_MAX; x = matrix_value(a, i, 0); for (j = 0; j < b->rows; j++) { y = matrix_value(b, j, 0); d = metric_euclidean(x, y, length); if (d < inf) { inf = d; angle = y[length]; } } a_c[0] = CLANN_COS(x[length]); a_c[1] = CLANN_SIN(x[length]); b_c[0] = CLANN_COS(angle); b_c[1] = CLANN_SIN(angle); d = metric_dot_product(a_c, b_c, 2); if (d < 1 - limit) count++; } return count; }
void som_incremental_adjust_of_weights(struct som *ann, clann_real_type *x, clann_real_type *winner) { clann_real_type *w, *p, h; clann_size_type i, k; for (i = 0; i < ann->grid.n_neurons; i++) { p = matrix_value(&ann->grid.indexes, i, 0); w = matrix_value(&ann->grid.weights, i, 0); h = som_compute_neighborhood_distance(ann, p, winner); for (k = 0; k < ann->input_size; k++) w[k] += ann->actual_learning_rate * h * (x[k] - w[k]); } }
void som_find_winner_neuron(struct som *ann, clann_real_type *x, clann_real_type **winner) { clann_real_type *w, distance, minimun = (clann_real_type) INT_MAX; clann_size_type i; for (i = 0; i < ann->grid.n_neurons; i++) { w = matrix_value(&ann->grid.weights, i, 0); distance = metric_euclidean(x, w, ann->input_size); if (distance < minimun) { minimun = distance; *winner = matrix_value(&ann->grid.indexes, i, 0); } } }
int som_save(struct som *ann, const char *file) { FILE *fd; if ((fd = fopen(file, "w"))) { unsigned int i, j; fprintf(fd, "%s\n", SOM_FILE_HEADER); fprintf(fd, CLANN_SIZE_PRINTF" ", ann->input_size); fprintf(fd, CLANN_SIZE_PRINTF" ", ann->grid.width); fprintf(fd, CLANN_PRINTF" ", ann->learning_rate); fprintf(fd, CLANN_PRINTF" ", ann->const_1); fprintf(fd, CLANN_PRINTF"\n", ann->const_2); for (i = 0; i < ann->grid.weights.rows; i++) { fprintf(fd, CLANN_PRINTF, *matrix_value(&ann->grid.weights, i, 0)); for (j = 1; j < ann->grid.weights.cols; j++) fprintf(fd, " " CLANN_PRINTF, *matrix_value(&ann->grid.weights, i, j)); fprintf(fd, "\n"); } fclose(fd); return 1; } printf("E. [SOM] Can not open/create file to write.\n"); return 0; }
void som_train_incremental(struct som *ann, struct matrix *x, unsigned int epochs) { clann_size_type s, *mess = malloc(sizeof(clann_size_type) * x->rows); clann_real_type *sample, *winner = NULL; /* * Index vector used to shuffle the input presentation sequence */ for (s = 0; s < x->rows; s++) mess[s] = s; while (ann->epoch <= epochs) { clann_shuffle((clann_int_type *) mess, x->rows); som_adjust_width(ann); som_adjust_learning_rate(ann); /* * For each input sample `s' */ for (s = 0; s < x->rows; s++) { sample = matrix_value(x, mess[s], 0); som_find_winner_neuron(ann, sample, &winner); som_incremental_adjust_of_weights(ann, sample, winner); } #if CLANN_VERBOSE printf("N. [SOM] Width: "CLANN_PRINTF", Rate: "CLANN_PRINTF" (%d).\n", ann->actual_width, ann->actual_learning_rate, ann->epoch); #endif ann->epoch += ann->step; } free((void *) mess); }
void som_grid_indexes(struct som *ann, clann_size_type index, clann_real_type *buffer, clann_size_type *count) { unsigned int i; if (index < ann->grid.dimension) { for (i = 0; i < ann->grid.width; i++) { buffer[index] = i; som_grid_indexes(ann, index + 1, buffer, count); } } else { for (i = 0; i < ann->grid.dimension; i++) *matrix_value(&ann->grid.indexes, *count, i) = buffer[i]; *count += 1; } }
void main() { char exp[10]; int ssm=0,row=0,col=0; node *temp; // clrscr(); printf("Enter Exp : "); scanf("%s",exp); matrix_value(); while(exp[ssm] != '\0') { if(ssm==0) { tos++; oprate[tos].op_name = exp[tos]; } else { if(isOperator(exp[ssm]) == -1) { oprate[tos].t = (node*) malloc (sizeof(node)); oprate[tos].t->data = exp[ssm]; oprate[tos].t->lptr = '\0'; oprate[tos].t->rptr = '\0'; } else { row = getOperatorPosition(oprate[tos].op_name); col = getOperatorPosition(exp[ssm]); if(matrix[row][col] == 0) { tos++; oprate[tos].op_name = exp[ssm]; } else if(matrix[row][col] == 1) { temp = (node*) malloc (sizeof(node)); temp->data = oprate[tos].op_name; temp->lptr = (oprate[tos-1].t); temp->rptr = (oprate[tos].t); tos--; oprate[tos].t = temp; ssm--; } else if(matrix[row][col] == 2) { //temp = (node*) malloc (sizeof(node)); temp = oprate[tos].t; tos--; oprate[tos].t = temp; } else if(matrix[row][col] == 3) { printf("\nExpression is Invalid...\n"); printf("%c %c can not occur simultaneously\n",oprate[tos].op_name,exp[ssm]); break; } } } ssm++; } printf("show tree \n\n\n"); show_tree(oprate[tos].t); printf("Over"); getch(); getch(); }
void cb_display(void) { unsigned int i, j; clann_real_type *w; glPushMatrix(); glRotatef((GLfloat) axis_z, 0.0, 0.0, 1.0); glRotatef((GLfloat) axis_y, 0.0, 1.0, 0.0); glRotatef((GLfloat) axis_x, 1.0, 0.0, 0.0); glClear(GL_COLOR_BUFFER_BIT); if (show[AXIS]) { glTranslatef(-1.0, -1.0, -1.0); glLineWidth(1.0); glColor3f(1.0, 0.0, 0.0); glBegin(GL_LINE_STRIP); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.5, 0.0, 0.0); glEnd(); glColor3f(0.0, 1.0, 0.0); glBegin(GL_LINE_STRIP); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.0, 0.5, 0.0); glEnd(); glColor3f(0.0, 0.0, 1.0); glBegin(GL_LINE_STRIP); glVertex3f(0.0, 0.0, 0.0); glVertex3f(0.0, 0.0, 0.5); glEnd(); glTranslatef(1.0, 1.0, 1.0); } /** * Display input set */ glColor3f(0.6, 0.6, 0.6); glPointSize(1.0); if (show[INPUT]) { glBegin(GL_POINTS); for (i = 0; i < x.rows; i++) { glVertex3f((GLfloat) *matrix_value(&x, i, X), (GLfloat) *matrix_value(&x, i, Y), (GLfloat) *matrix_value(&x, i, Z)); } glEnd(); } /** * Display the connections of SOM * if (show[MESH]) { glColor3f(0.0, 0.0, 0.0); glPointSize(1.0); glLineWidth(2.0); for (i = 0; i < ann.grid.x_len; i++) { glBegin(GL_LINE_STRIP); for (j = 0; j < ann.grid.y_len; j++) { w = som_grid_get_weights(&ann.grid, i, j); glVertex3f((GLfloat) w[X], (GLfloat) w[Y], (GLfloat) w[Z]); } glEnd(); } for (i = 0; i < ann.grid.y_len; i++) { glBegin(GL_LINE_STRIP); for (j = 0; j < ann.grid.x_len; j++) { w = som_grid_get_weights(&ann.grid, j, i); glVertex3f((GLfloat) w[X], (GLfloat) w[Y], (GLfloat) w[Z]); } glEnd(); } }*/ /** * Display the output neurons */ if (show[POINTS]) { glPointSize(3.0); glLineWidth(1.0); glBegin(GL_POINTS); for (j = 0; j < ann.grid.n_neurons; j++) { w = matrix_value(&ann.grid.weights, j, 0); glVertex3f((GLfloat) w[X], (GLfloat) w[Y], (GLfloat) w[Z]); } glEnd(); } glPopMatrix(); glutSwapBuffers(); }
void kmeans_train(struct kmeans *ann, const struct matrix *x, clann_real_type learning_rate) { clann_size_type s, i, *mess = malloc(sizeof(clann_size_type) * x->rows); clann_real_type e, distance, minimun, *sample, *winner = NULL; ann->old_centers = matrix_copy_new(&ann->centers); /* * Index vector used to shuffle the input presentation sequence */ for (s = 0; s < x->rows; s++) mess[s] = s; do { /* * For each input in the shuffle list */ clann_shuffle((clann_int_type *) mess, x->rows); e = 0; for (s = 0; s < x->rows; s++) { sample = matrix_value(x, mess[s], 0); /* * Find the center most closer to current input sample */ minimun = metric_euclidean(sample, matrix_value(&ann->centers, 0, 0), ann->center_size); winner = matrix_value(&ann->centers, 0, 0); for (i = 1; i < ann->n_centers; i++) { distance = metric_euclidean(sample, matrix_value(&ann->centers, i, 0), ann->center_size); if (distance < minimun) { minimun = distance; winner = matrix_value(&ann->centers, i, 0); } } /* * Adjust winning center positions */ for (i = 0; i < ann->center_size; i++) winner[i] += learning_rate * (sample[i] - winner[i]); } /* * Compute the mean of changes */ for (i = 0; i < ann->n_centers; i++) { e += metric_euclidean(matrix_value(&ann->centers, i, 0), matrix_value(ann->old_centers, i, 0), ann->center_size); } e = e / ann->n_centers; #if CLANN_VERBOSE printf("N. [KMEANS] Mean centers' update: " CLANN_PRINTF ".\n", e); #endif matrix_copy(&ann->centers, ann->old_centers); } while (e > ann->noticeable_change_rate); free((void *) ann->old_centers); ann->old_centers = NULL; }
clann_real_type* som_grid_get_weights(struct som *ann, unsigned int index) { return matrix_value(&ann->grid.weights, index, 0); }