CBasisFunctions::CBasisFunctions(u32 count, u32 dimension, float a_range, float b_range, float w_range, u32 function_type) { this->functions_count = count; this->dimension = dimension; this->a_range = a_range; this->b_range = b_range; this->w_range = w_range; this->function_type = function_type; #ifdef DEBUG_MODE printf(" basis function type %u\n", this->function_type); #endif u32 j, i; a = (float**)malloc(this->functions_count*sizeof(float*)); b = (float*)malloc(this->functions_count*sizeof(float)); for (j = 0; j < this->functions_count; j++) { a[j] = (float*)malloc(this->dimension*sizeof(float)); for (i = 0; i < this->dimension; i++) a[j][i] = this->a_range*rnd_(); //in range -a_range..a_range if (function_type == BASIS_FUNCTION_TYPE_GAUSS) { float k = 0.90; b[j] = this->b_range*(k + (1.0 - k)*abs_(rnd_())); } else { float k = 0.50; b[j] = this->b_range*(k + (1.0 - k)*abs_(rnd_())); } } for (j = 0; j < this->functions_count; j++) output.push_back(0.0); w = (float*)malloc(this->functions_count*sizeof(float)); distance = (float*)malloc(this->functions_count*sizeof(float)); for (j = 0; j < this->functions_count; j++) { w[j] = 0.0; distance[j] = 0.0; } for (i = 0; i < this->dimension; i++) input.push_back(0.0); linear_combination = 0.0; }
double root(double a, int n) { double d, x = 1; if (!a) return 0; if (n < 1 || (a < 0 && !(n&1))) return 0./0.; /* NaN */ do { d = (a / pow_(x, n - 1) - x) / n; x+= d; } while (abs_(d) >= abs_(x) * (DBL_EPSILON * 10)); return x; }
extern "C" PyObject* PyNumber_Absolute(PyObject* o) { try { return abs_(o); } catch (Box* b) { Py_FatalError("unimplemented"); } }
void vector_normalise(std::vector<float> *vector) { u32 i; float min_v = 1000000000000.0; float max_v = -min_v; for (i = 0; i < vector->size(); i++) { if ((*vector)[i] < min_v) min_v = (*vector)[i]; if ((*vector)[i] > max_v) max_v = (*vector)[i]; } float tmp = max(abs_(min_v), abs_(max_v)); for (i = 0; i < vector->size(); i++) (*vector)[i]/= tmp; }
void draw_line_antialias_(GBitmap* img, int16_t x1, int16_t y1, int16_t x2, int16_t y2, GColor8 color) { uint8_t* img_pixels = gbitmap_get_data(img); int16_t w = gbitmap_get_bounds(img).size.w; int16_t h = gbitmap_get_bounds(img).size.h; fixed dx = int_to_fixed(abs_(x1 - x2)); fixed dy = int_to_fixed(abs_(y1 - y2)); bool steep = dy > dx; if(steep){ swap_(x1, y1); swap_(x2, y2); } if(x1 > x2){ swap_(x1, x2); swap_(y1, y2); } dx = x2 - x1; dy = y2 - y1; fixed intery; int x; for(x=x1; x <= x2; x++) { intery = int_to_fixed(y1) + (int_to_fixed(x - x1) * dy / dx); if(x>=0){ if(steep){ _plot(img_pixels, w, h, ipart_(intery) , x, color, rfpart_(intery)); _plot(img_pixels, w, h, ipart_(intery) + 1, x, color, fpart_(intery)); } else { _plot(img_pixels, w, h, x, ipart_(intery) , color, rfpart_(intery)); _plot(img_pixels, w, h, x, ipart_(intery) + 1, color, fpart_(intery)); } } } }
void peak_hill_random(struct sPeakHill *func, u32 dim, u32 peak_count, u32 hill_count) { u32 i, j; std::vector<float> tmp; for (i = 0; i < dim; i++) tmp.push_back(0.0); for (j = 0; j < peak_count; j++) { for (i = 0; i < dim; i++) tmp[i] = rnd_(); func->p_alpha.push_back(tmp); func->p_r.push_back(-4.0*abs_(rnd_())); } for (j = 0; j < hill_count; j++) { for (i = 0; i < dim; i++) tmp[i] = rnd_(); func->h_alpha.push_back(tmp); func->h_beta.push_back(10.0 + 10.0*abs_(rnd_())); func->h_w.push_back(abs_(rnd_())); } }
void normalise_mat(std::vector<std::vector<float>> *mat) { /* float max_v = -1.0; float min_v = -1.0; u32 i, j; for (j = 0; j < mat->size(); j++) for (i = 0; i < (*mat)[j].size(); i++) { max_v = max((*mat)[j][i], max_v); min_v = min((*mat)[j][i], min_v); } if (max_v != min_v) for (j = 0; j < mat->size(); j++) for (i = 0; i < (*mat)[j].size(); i++) { // this->q[j][i]/= max_v; (*mat)[j][i] = map_to_interval(min_v, max_v, -1.0, 1.0, (*mat)[j][i]); } */ float max_v = 0.0; u32 i, j; for (j = 0; j < mat->size(); j++) for (i = 0; i < (*mat)[j].size(); i++) { max_v = max(abs_((*mat)[j][i]), max_v); } if (max_v != 0.0) for (j = 0; j < mat->size(); j++) for (i = 0; i < (*mat)[j].size(); i++) { (*mat)[j][i]/= max_v; } }
u32 CQFuncBFNNHybrid::get_action_idx(std::vector<float> action) { u32 i, j; u32 action_idx = 0; float action_dist = 1000000000.0; for (j = 0; j < actions.size(); j++) { float dist = 0.0; for (i = 0; i < actions[j].size(); i++) dist+= abs_(actions[j][i] - action[i]); if (dist < action_dist) { action_dist = dist; action_idx = j; } } return action_idx; }
void CServer::respawn(struct sRobot *robot) { std::vector<float> position; u32 i, j; for (i = 0; i < ROBOT_SPACE_DIMENSION; i++) position.push_back(0.0); float dist_min; do { /* for (i = 0; i < ROBOT_SPACE_DIMENSION; i++) position[i] = rnd_()*200.0; */ for (i = 0; i < ROBOT_SPACE_DIMENSION; i++) position[i] = rnd_()*position_max[i]*0.8; dist_min = 99999999999999999999.9; for (j = 0; j < robots.size(); j++) { float dist = 0.0; for (i = 0; i < ROBOT_SPACE_DIMENSION; i++) dist+= abs_(position[i] - robots[j].position[i]); if (dist < dist_min) dist_min = dist; } } while (dist_min < (colision_distance*2.0)); for (i = 0; i < ROBOT_SPACE_DIMENSION; i++) robot->position[i] = position[i]; }
void x2pmp(FILE *in, FILE *out, int scale, int p_width, int p_length, int x_pos, int y_pos, /* in pels (units of PPI) */ char *head, char *foot, enum orientation orient, int invert) { unsigned char *buffer, *win_name; unsigned int win_name_size, width, height, ncolors; unsigned int buffer_size, one_plane_size, byte_width, fixed_width; int no_of_bits; unsigned long swaptest = 1; XWDFileHeader header; /* Read header from file */ if (fread((char *)&header, sizeof(header), 1, in) != 1) { if (feof(in)) return; else leave("fread"); } if (*(char *) &swaptest) _swaplong((char *) &header, sizeof(header)); if (header.file_version != XWD_FILE_VERSION) { fprintf(stderr,"%s: file format version %d, not %d\n", progname, (int)header.file_version, XWD_FILE_VERSION); } win_name_size = abs_(header.header_size - sizeof(header)); if ((win_name = (unsigned char *) calloc(win_name_size, (unsigned) sizeof(char))) == NULL) leave("Can't calloc window name storage."); /* Read window name from file */ if (fread((char *) win_name, sizeof(char), (int) win_name_size, in) != win_name_size) leave("Unable to read window name from dump file."); DEBUG(>= 1) fprintf(stderr,"win_name =%s\n", win_name); width = header.pixmap_width; height = header.pixmap_height; fixed_width = 8 * (byte_width = header.bytes_per_line); one_plane_size = byte_width * height; buffer_size = one_plane_size * ((header.pixmap_format == ZPixmap)? header.pixmap_depth: 1); /* Determine orientation and scale if not specified */ if (orient == UNSPECIFIED) orient = (fixed_width <= height)? PORTRAIT: LANDSCAPE; if (scale <= 0) { int real_height = height; if (head) real_height += FONT_HEIGHT_PIXELS << 1; if (foot) real_height += FONT_HEIGHT_PIXELS << 1; switch(orient) { default: case PORTRAIT: case UPSIDE_DOWN: scale = min_((p_width - 2*x_pos) / fixed_width, (p_length - 2*y_pos) / real_height); break; case LANDSCAPE: case LANDSCAPE_LEFT: scale = min_((p_length - 2*y_pos) / fixed_width, (p_width - 2*x_pos) / real_height); break; } if (scale <= 0) leave("PixMap doesn't fit on page."); else DEBUG(>1) fprintf(stderr, "scaling by %d to yield %d x %d image\n", scale, fixed_width*scale, height*scale); }
float fast_tanh(float x) { return x/(1.0 + abs_(x)); }
void CBasisFunctions::learn_linear_combination(float required_value, float learning_rate, bool learn_basis) { float error = required_value - linear_combination; u32 i, j; for (j = 0; j < this->functions_count; j++) { w[j]+= learning_rate*error*output[j]; if (w[j] > w_range) w[j] = w_range; if (w[j] < -w_range) w[j] = -w_range; } if (learn_basis == false) return; if (function_type == BASIS_FUNCTION_TYPE_GAUSS) { u32 dist_min_idx = 0; for (j = 0; j < this->functions_count; j++) if (distance[j] < distance[dist_min_idx]) dist_min_idx = j; //float lc = learning_rate*(0.1 + 90.0*abs_(required_value)); float lc = learning_rate*(0.1 + 90.0*abs_(required_value)); for (i = 0; i < this->dimension; i++) a[dist_min_idx][i] = (1.0 - lc)*a[dist_min_idx][i] + lc*input[i]; for (j = 0; j < this->functions_count; j++) { b[j]+= 0.1*error*w[j]; if (b[j] < 0.0) b[j] = 0.0; if (b[j] > this->b_range) b[j] = this->b_range; } } if (function_type == BASIS_FUNCTION_TYPE_KOHONEN) { u32 dist_min_idx = 0; for (j = 0; j < this->functions_count; j++) if (distance[j] < distance[dist_min_idx]) dist_min_idx = j; float lc = learning_rate*(0.1 + 90.0*abs_(required_value)); for (i = 0; i < this->dimension; i++) a[dist_min_idx][i] = (1.0 - lc)*a[dist_min_idx][i] + lc*input[i]; for (j = 0; j < this->functions_count; j++) { b[j]+= 0.01*error*w[j]; if (b[j] < 0.0) b[j] = 0.0; if (b[j] > 100.0) b[j] = 100.0; } } }
int main() { srand(time(NULL)); struct sRobotInit robot_init; robot_init.x = 0.0; robot_init.y = 0.0; robot_init.theta = 0.0; robot_init.wheel_diameter = 300.0; robot_init.wheel_distance = 800.0; robot_init.encoder_resolution = 1000; class CRobotModel *robot_model; robot_model = new CRobotModel(robot_init); class CLog *robot_log; robot_log = new CLog((char*)"robot_log.txt", 10); class CLog *nn_log; nn_log = new CLog((char*)"nn_result/nn_log.txt", 11); struct sRobotOutput robot_output, robot_output_filtered; //, robot_output_filtered_prev; u32 k; float error_filtered = 0.0; struct sNNInitStruct nn_init; u32 init_vector[] = {3, 10, 3}; for (k = 0; k < 3; k++) nn_init.init_vector.push_back(init_vector[k]); nn_init.weight_range = 4.0; nn_init.init_weight_range = 0.01*1.0/nn_init.weight_range; nn_init.learning_constant = 1.0/100.0; nn_init.output_limit = 4.0; nn_init.neuron_type = NN_LAYER_NEURON_TYPE_TANH; nn_init.order = 5; class CNN *nn; nn = new CNN(nn_init); /* struct sNeuralNetworkInitStructure nn_init; nn_init.init_vector_size = 3; nn_init.init_vector = init_vector; nn_init.weight_range = 4.0; nn_init.learning_constant = 0.0001; nn_init.weight_range_init = 0.1; nn_init.order = 5; nn_init.neuron_type = NEURON_TYPE_MIXED; //nn_init.neuron_type = NEURON_TYPE_COMMON; class CNeuralNetwork *nn; nn = new CNeuralNetwork(nn_init); */ u32 iterations = 100000; //robot_output_filtered = robot_model->get_output_filtered(); for (k = 0; k < iterations; k++) { robot_model->random_input(0.03); robot_model->process(); robot_output = robot_model->get_output(); //robot_output_filtered_prev = robot_output_filtered; robot_output_filtered = robot_model->get_output_filtered(); float k_tmp = 1000.0; std::vector<float> nn_input; nn_input.push_back(robot_output_filtered.left_encoder); nn_input.push_back(robot_output_filtered.right_encoder); nn_input.push_back(1.0); std::vector<float> nn_required_output; /* nn_required_output.push_back(robot_output_filtered.x/500.0); nn_required_output.push_back(robot_output_filtered.y/500.0); nn_required_output.push_back(robot_output_filtered.theta*k_tmp/2500.0); */ nn_required_output.push_back(sin(robot_output_filtered.left_encoder)); nn_required_output.push_back(cos(robot_output_filtered.right_encoder)); nn_required_output.push_back(cos(robot_output_filtered.left_encoder + robot_output_filtered.right_encoder)); std::vector<float> nn_output; nn_output.push_back(rnd_()); nn_output.push_back(rnd_()); nn_output.push_back(rnd_()); nn->process(nn_input); nn_output = nn->get(); if ( k < (iterations - 1000) ) nn->learn(nn_required_output); float error = 0.0; u32 i; for (i = 0; i < nn_required_output.size(); i++) error+= abs_(nn_required_output[i] - nn_output[i]); float fil = 0.98; error_filtered = error_filtered*fil + abs_(error)*(1.0 - fil); if ( k > (iterations - 500) ) { robot_log->add(0, robot_output.x); robot_log->add(1, robot_output.y); robot_log->add(2, robot_output.theta*k_tmp); robot_log->add(3, robot_output.left_encoder); robot_log->add(4, robot_output.right_encoder); robot_log->add(5, robot_output_filtered.x); robot_log->add(6, robot_output_filtered.y); robot_log->add(7, robot_output_filtered.theta*k_tmp); robot_log->add(8, robot_output_filtered.left_encoder); robot_log->add(9, robot_output_filtered.right_encoder); nn_log->add(0, nn_input[0]/1000.0); nn_log->add(1, nn_input[1]/1000.0); nn_log->add(2, nn_input[2]); nn_log->add(3, nn_required_output[0]); nn_log->add(4, nn_required_output[1]); nn_log->add(5, nn_required_output[2]); nn_log->add(6, nn_output[0]); nn_log->add(7, nn_output[1]); nn_log->add(8, nn_output[2]); nn_log->add(9, error); nn_log->add(10, error_filtered); } } robot_log->save(); nn_log->save(); nn->save((char*)"nn/nn"); delete nn_log; delete robot_model; delete robot_log; delete nn; printf("program done\n"); return 0; }