Exemplo n.º 1
0
/* YYLEX -- Return the next token from the input stream.  Two separate lexical
 * analyzers are provided, the "command mode" lexical analyzer for interactive
 * command entry, and the "compute mode" analyzer for more sophisticated
 * applications.  The nesting level of parentheses and braces is used to switch
 * between the two modes.  When the paren level is nonzero compute mode is in
 * effect.  Mode switching may be defeated by setting the external variable
 * _lexmodes to zero.  A single parser accepts input from both lexical
 * analyzers.
 */
int 
yylex (void)
{
	register int	token;

	if (_lexmodes && parenlevel == 0 && bracelevel < PBRACE) {
	    while (!(token = lexicon()))
		if (yywrap())
		    break;
	} else
	    token = lex_yylex();

	if (!lexdebug)
	    return (token);

#if LEXDEBUG
	switch (token) {
	case Y_CONSTANT:
	    eprintf ("CONSTANT ");
	    fprop (stderr, reference (operand, yylval));
	    eprintf ("\n");
	    break;
	case Y_IDENT:
	    eprintf ("IDENT ");
	    fprop (stderr, reference (operand, yylval));
	    eprintf ("\n");
	    break;
	case Y_OSESC:
	    eprintf ("Y_OSESC ");
	    fprop (stderr, reference (operand, yylval));
	    eprintf ("\n");
	    break;
	case Y_APPEND:
	    eprintf ("Y_APPEND\n");
	    break;
	case Y_ALLAPPEND:
	    eprintf ("Y_ALLAPPEND\n");
	    break;
	case Y_ALLREDIR:
	    eprintf ("Y_ALLREDIR\n");
	    break;
	case Y_GSREDIR:
	    eprintf ("Y_GSREDIR\n");
	    break;
	case Y_ALLPIPE:
	    eprintf ("Y_ALLPIPE\n");
	    break;
	case Y_NEWLINE:
	    eprintf ("NEWLINE\n");
	    break;
	default:
	    eprintf ("`%c'\n", token);
	    break;
	}
#endif

	return (token);
}
Exemplo n.º 2
0
Arquivo: exec.c Projeto: geechee/iraf
/* PRINTCALL -- Print the calling sequence for a task.  Called by killtask()
 * to print stack trace.
 */
void
printcall (
  FILE	*fp,
  struct task *tp
)
{
	register struct param *pp;
	int	notfirst = 0;

	fprintf (fp, "    %s (", tp->t_ltp->lt_lname);
	for (pp = tp->t_pfp->pf_pp;  pp != NULL;  pp = pp->p_np)
	    if (pp->p_flags & P_CLSET) {
		if (notfirst)
		    fprintf (fp, ", ");
		notfirst++;
		if (!(tp->t_pfp->pf_flags & PF_FAKE) && !(pp->p_mode & M_FAKE))
		    fprintf (fp, "%s=", pp->p_name);

		/* Use only low level routines to print the parameter value to
		 * avoid error recursion.  In particular, parameter indirection
		 * is not resolved.
		 */
		if (!(pp->p_valo.o_type & OT_UNDEF))
		    fprop (fp, &pp->p_valo);
		else
		    fprintf (fp, "UNDEF");
	    }
	fprintf (fp, ")\n");
}
Exemplo n.º 3
0
  double euclidean_module<T1,T2,Tstate1,Tstate2>::
  infer2(Tstate1 &i1, Tstate2 &infered_label, 
	 infer_param &ip, Tstate2 *label, Tstate1 *energy) {
    infered_label.x.set(0);
    Tstate1 tmp;
    idx_bloop1(e, energies, T1) {
      fprop(i1, infered_label, tmp);
      idx_copy(tmp.x, e);
      infered_label.x.set(infered_label.x.get() + 1);
    }
void Simulator::outputProperty(){
	fstream fprop("property.txt",ios::out);
	fprop << "*** Energy Limit ***" << endl;
	fprop << " maxPG	=" << maxPG << endl;
	fprop << " maxAE	=" << maxAE << endl;
	fprop << " maxPF	=" << maxPF << endl;
	fprop << " maxAF	=" << maxAF << endl;
	fprop << " totAtom	=" << totAtom << endl;
	fprop << endl;
	fprop << "*** Time ***" << endl;
	fprop << " dt	=" << dt << endl;
	fprop << " maxstep	=" << maxstep << endl;
	fprop << " eps	=" << eps << endl;
	fprop << " div	=" << div << endl;
	fprop << " debug	=" << debug << endl;
	fprop << endl;
	fprop << "*** Input ***" << endl;
	fprop << " pump	=" << pump << endl;
	fprop << " detune	=" << detune << endl;
	fprop << " width	=" << width << endl;
	fprop << endl;
	fprop << "*** Interaction ***" << endl;
	fprop << " cohg	=" << cohg << endl;
	fprop << " cohf	=" << cohf << endl;
	fprop << endl;
	fprop << "*** Loss ***" << endl;
	fprop << " lossPG	=" << lossPG << endl;
	fprop << " lossPF	=" << lossPF << endl;
	fprop << " life	=" << life << endl;
	if(restarg.size()>0){
		fprop << "!!! unknown arg !!!" << endl;
		auto ite = restarg.begin();
		while(ite!=restarg.end()){
			fprop << " " << (*ite).first << "	=" << (*ite).second << endl;
			ite++;
		}
	}
	fprop.close();
}
Exemplo n.º 5
0
/* GQUERY -- Determine if the value of a parameter given by the user is OK.
 * Also, store the new value in the parameter; in the case of a list
 * structured parameter, the new value is the name of a new list file.
 * This routine is called by EPARAM to verify that the new parameter value
 * is inrange and set the new value if so.
 */
char *
gquery (
  struct param *pp,
  char	*string
)
{
	register char *ip;
	char	buf[SZ_LINE];
	char	*query_status, *nlp, *errmsg;
	int	arrflag, offset, bastype, batch;
	struct	operand o;
	char	*strcpy(), *index();

	bastype = pp->p_type & OT_BASIC;
	batch   = firstask->t_flags & T_BATCH;
	arrflag = pp->p_type & PT_ARRAY;

	if (arrflag)
	    offset = getoffset(pp);

	if (batch) {
	    errmsg = e1;
	    return (errmsg);
	} else
	    query_status = strcpy (buf, string);

	ip = buf;

	/* Set o to the current value of the parameter.  Beware that some
	 * of the logical branches which follow assume that struct o has
	 * been initialized to the current value of the parameter.
	 */
	if (pp->p_type & PT_LIST) {
	    setopundef (&o);
	} else if (arrflag) {
	    poffset (offset);
	    paramget (pp, FN_VALUE);
	    o = popop ();
	} else
	    o = pp->p_valo;

	/* Handle eof, a null-length line (lone carriage return),
	 * and line with more than SZ_LINE chars.  Ignore leading whitespace
	 * if basic type is not string.
	 */
	if (query_status == NULL)
	    goto testval;

	/* Ignore leading whitespace if it is not significant for this
	 * datatype.  Do this before testing for empty line, so that a
	 * return such as " \n" is equivalent to "\n".  I.e., do not
	 * penalize the user if they type the space bar by accident before
	 * typing return to accept the default value.
	 */
	if (bastype != OT_STRING || (pp->p_type & PT_LIST))
	    while (*ip == ' ' || *ip == '\t')
		ip++;

	if (*ip == '\n') {
	    /* Blank lines usually just accept the current value
	     * but if the param in a string and is undefined,
	     * it sets the string to a (defined) nullstring.
	     */
	    if (bastype == OT_STRING && opundef (&o)) {
		*ip = '\0';
		o = makeop (ip, bastype);
	    } else
		goto testval;
	}

	/* Cancel the newline. */
	if ((nlp = index (ip, '\n')) != NULL)
	    *nlp = '\0';

	/* Finally, we have handled the pathological cases.
	 */
	if (pp->p_type & PT_LIST)
	    o = makeop (string, OT_STRING);
	else
	    o = makeop (ip, bastype);

testval:   
	if (*string == '@')
	    errmsg = "OK";
	else if (pp->p_type & PT_LIST)
	    errmsg = "OK";
	else if (inrange (pp, &o))
	    errmsg = "OK";
	else {
	    errmsg = e2;
	    return (errmsg);
	}

	if (cldebug) {
	    eprintf ("changing `%s.p_val' to ", pp->p_name);
	    fprop (stderr, &o);
	    eprintf ("\n");
	}

	/* Update param with new value.
	 */
	pushop (&o);
	if (arrflag)
	    poffset (offset);

	paramset (pp, FN_VALUE);
	pp->p_flags |= P_SET;

	return ("OK");
}
Exemplo n.º 6
0
	ReflectionComponent CollisionBody::Reflection(CollisionBody* val) {
		ReflectionComponent refcomp;
		Property fprop(Property::FLOAT);
		(refcomp.properties["Mass"] = fprop).Set<float>(val->mass);
		refcomp.properties["Mass"].update_func = [val] (Property& prop) { val->mass = prop.Get<float>(); };
		static std::vector<std::string> choices = {"BOX", "SPHERE", "CAPSULE"};
		std::string current_shape;
		switch (val->collision_shape) {
			case SPHERE:
				current_shape = "SPHERE";
				(refcomp.properties["radius"] = fprop).Set<float>(val->radius);
				refcomp.properties["radius"].update_func = [val] (Property& prop) {
					val->radius = prop.Get<float>();
					static_cast<btSphereShape*>(val->shape.get())->setUnscaledRadius(val->radius);
				};
				break;
			case BOX:
				current_shape = "BOX";
				(refcomp.properties["extent_x"] = fprop).Set<float>(val->half_extents.x());
				refcomp.properties["extent_x"].update_func = [val] (Property& prop) {
					val->half_extents.setX(prop.Get<float>());
					static_cast<btBoxShape*>(val->shape.get())->setImplicitShapeDimensions(val->half_extents);
				};
				(refcomp.properties["extent_y"] = fprop).Set<float>(val->half_extents.y());
				refcomp.properties["extent_y"].update_func = [val] (Property& prop) {
					val->half_extents.setY(prop.Get<float>());
					static_cast<btBoxShape*>(val->shape.get())->setImplicitShapeDimensions(val->half_extents);
				};
				(refcomp.properties["extent_z"] = fprop).Set<float>(val->half_extents.z());
				refcomp.properties["extent_z"].update_func = [val] (Property& prop) {
					val->half_extents.setZ(prop.Get<float>());
					static_cast<btBoxShape*>(val->shape.get())->setImplicitShapeDimensions(val->half_extents);
				};
				break;
			case CAPSULE:
				current_shape = "CAPSULE";
				(refcomp.properties["radius"] = fprop).Set<float>(val->radius);
				refcomp.properties["radius"].update_func = [val] (Property& prop) {
					val->radius = prop.Get<float>();
					static_cast<btCapsuleShape*>(val->shape.get())->setImplicitShapeDimensions(
						btVector3(val->radius, 0.5f * val->height, val->radius));
				};
				(refcomp.properties["height"] = fprop).Set<float>(val->height);
				refcomp.properties["height"].update_func = [val] (Property& prop) {
					val->height = prop.Get<float>();
					static_cast<btCapsuleShape*>(val->shape.get())->setImplicitShapeDimensions(
						btVector3(val->radius, 0.5f * val->height, val->radius));
				};
				break;
		}
		radio_t shape_choices = std::make_pair(std::ref(choices), current_shape);
		Property rprop(Property::RADIO);
		(refcomp.properties["Shape"] = rprop).Set<radio_t>(shape_choices);
		refcomp.properties["Shape"].update_func = [val] (Property& prop) { 
			radio_t shape_choices = prop.Get<radio_t>();
			if (shape_choices.second == "BOX") {
				val->new_collision_shape = BOX;
			}
			else if (shape_choices.second == "SPHERE") {
				val->new_collision_shape = SPHERE;
			}
			else if (shape_choices.second == "CAPSULE") {
				val->new_collision_shape = CAPSULE;
			}
		};
		Property prop(Property::BOOLEAN);
		(refcomp.properties["Disable Deactivation"] = prop).Set<bool>(val->disable_deactivation);
		refcomp.properties["Disable Deactivation"].update_func = [val] (Property& prop) { val->disable_deactivation = prop.Get<bool>(); };
		(refcomp.properties["Disable Rotation"] = prop).Set<bool>(val->disable_rotation);
		refcomp.properties["Disable Rotation"].update_func = [val] (Property& prop) { val->disable_rotation = prop.Get<bool>(); };
		return std::move(refcomp);
	}
Exemplo n.º 7
0
int main(int argc, char *argv[]){
  
	Params params;
  
	std::map<std::string, std::string> args;
	readArgs(argc, argv, args);
	if(args.find("algo")!=args.end()){
		params.algo = args["algo"];
	}else{
		params.algo = "qdMCNat";
	}

	if(args.find("inst_file")!=args.end())
		setParamsFromFile(args["inst_file"], args, params);
	else   
		setParams(params.algo, args, params);
  
	createLogDir(params.dir_path);
  
	gen.seed(params.seed);

	// Load the dataset
	MyMatrix X_train, X_valid;
	VectorXd Y_train, Y_valid;
	loadMnist(params.ratio_train, X_train, X_valid, Y_train, Y_valid);
	//loadCIFAR10(params.ratio_train, X_train, X_valid, Y_train, Y_valid);
	//loadLightCIFAR10(params.ratio_train, X_train, X_valid, Y_train, Y_valid);
  
	// ConvNet parameters
	std::vector<ConvLayerParams> conv_params;
	ConvLayerParams conv_params1;
	conv_params1.Hf = 5;
	conv_params1.stride = 1;
	conv_params1.n_filter = 20;
	conv_params1.padding = 0;
	conv_params.push_back(conv_params1);
  
	ConvLayerParams conv_params2;
	conv_params2.Hf = 5;
	conv_params2.stride = 1;
	conv_params2.n_filter = 50;
	conv_params2.padding = 0;
	conv_params.push_back(conv_params2);

	std::vector<PoolLayerParams> pool_params;
	PoolLayerParams pool_params1;
	pool_params1.Hf = 2;
	pool_params1.stride = 2;
	pool_params.push_back(pool_params1);

	PoolLayerParams pool_params2;
	pool_params2.Hf = 2;
	pool_params2.stride = 2;
	pool_params.push_back(pool_params2);
  
	const unsigned n_conv_layer = conv_params.size();
  
	for(unsigned l = 0; l < conv_params.size(); l++){

		if(l==0){
			conv_params[l].filter_size = conv_params[l].Hf * conv_params[l].Hf * params.img_depth;
			conv_params[l].N = (params.img_width - conv_params[l].Hf + 2*conv_params[l].padding)/conv_params[l].stride + 1;
		}
		else{
			conv_params[l].filter_size = conv_params[l].Hf * conv_params[l].Hf * conv_params[l-1].n_filter;
			conv_params[l].N = (pool_params[l-1].N - conv_params[l].Hf + 2*conv_params[l].padding)/conv_params[l].stride + 1;
		}
		pool_params[l].N = (conv_params[l].N - pool_params[l].Hf)/pool_params[l].stride + 1;
	}
  
	// Neural Network parameters
	const unsigned n_training = X_train.rows();
	const unsigned n_valid = X_valid.rows();
	const unsigned n_feature = X_train.cols();
	const unsigned n_label = Y_train.maxCoeff() + 1;
  
	params.nn_arch.insert(params.nn_arch.begin(),conv_params[n_conv_layer-1].n_filter * pool_params[n_conv_layer-1].N * pool_params[n_conv_layer-1].N);
	params.nn_arch.push_back(n_label);
	const unsigned n_layers = params.nn_arch.size();
  
	// Optimization parameter
	const int n_train_batch = ceil(n_training/(float)params.train_minibatch_size);
	const int n_valid_batch = ceil(n_valid/(float)params.valid_minibatch_size);
	double prev_loss = std::numeric_limits<double>::max();
	double eta = params.eta;

	// Create the convolutional layer
	std::vector<MyMatrix> conv_W(n_conv_layer);
	std::vector<MyMatrix> conv_W_T(n_conv_layer);
	std::vector<MyVector> conv_B(n_conv_layer);
  
	// Create the neural network
	MyMatrix W_out(params.nn_arch[n_layers-2],n_label);
	std::vector<MySpMatrix> W(n_layers-2);
	std::vector<MySpMatrix> Wt(n_layers-2);
	std::vector<MyVector> B(n_layers-1);

	double init_sigma = 0.;
	ActivationFunction act_func;
	ActivationFunction eval_act_func;
	if(params.act_func_name=="sigmoid"){
		init_sigma = 4.0;
		act_func = std::bind(logistic,true,_1,_2,_3);
		eval_act_func = std::bind(logistic,false,_1,_2,_3);
	}else if(params.act_func_name=="tanh"){
		init_sigma = 1.0;
		act_func = std::bind(my_tanh,true,_1,_2,_3);
		eval_act_func = std::bind(my_tanh,false,_1,_2,_3);
	}else if(params.act_func_name=="relu"){
		init_sigma = 1.0; // TODO: Find the good value
		act_func = std::bind(relu,true,_1,_2,_3);
		eval_act_func = std::bind(relu,false,_1,_2,_3);
	}else{
		std::cout << "Not implemented yet!" << std::endl;
		assert(false);
	}

	std::cout << "Initializing the network... ";
	params.n_params = initNetwork(params.nn_arch, params.act_func_name, params.sparsity, conv_params, pool_params, W_out, W, Wt, B, conv_W, conv_W_T, conv_B); // TODO: Init the conv bias

	// Deep copy of parameters for the adaptive rule
	std::vector<MyMatrix> mu_dW(n_layers-1);
	std::vector<MyVector> mu_dB(n_layers-1);

	MyMatrix pW_out = W_out;
	std::vector<MySpMatrix> pW = W;
	std::vector<MySpMatrix> pWt = Wt;
	std::vector<MyVector> pB = B;

	MyMatrix ppMii_out, ppM0i_out;
	MyVector ppM00_out;
  
	std::vector<MySpMatrix> ppMii,ppM0i;
	std::vector<MyVector> ppM00;

	MyMatrix pMii_out,pM0i_out;
	MyVector pM00_out;
  
	std::vector<MySpMatrix> pMii,pM0i;
	std::vector<MyVector> pM00;

	std::vector<MyMatrix> conv_ppMii, conv_ppM0i;
	std::vector<MyVector> conv_ppM00;

	std::vector<MyMatrix> conv_pMii, conv_pM0i;
	std::vector<MyVector> conv_pM00;
  
	// Convert the labels to one-hot vector
	MyMatrix one_hot = MyMatrix::Zero(n_training, n_label);
	labels2oneHot(Y_train,one_hot);
  
	// Configure the logger 
	std::ostream* logger;
	if(args.find("verbose")!=args.end()){
		getOutput("",logger);
	}else{
		getOutput(params.file_path,logger);
	}

	double cumul_time = 0.;
  
	printDesc(params, logger);
	printConvDesc(params, conv_params, pool_params, logger);
	std::cout << "Starting the learning phase... " << std::endl;
	*logger << "Epoch Time(s) train_loss train_accuracy valid_loss valid_accuracy eta" << std::endl;
  
	for(unsigned i = 0; i < params.n_epoch; i++){
		for(unsigned j = 0; j < n_train_batch; j++){
      
			// Mini-batch creation
			unsigned curr_batch_size = 0;
			MyMatrix X_batch, one_hot_batch;
			getMiniBatch(j, params.train_minibatch_size, X_train, one_hot, params, conv_params[0], curr_batch_size, X_batch, one_hot_batch);
      
			double prev_time = gettime();

			// Forward propagation for conv layer
			std::vector<std::vector<unsigned>> poolIdxX1(n_conv_layer);
			std::vector<std::vector<unsigned>> poolIdxY1(n_conv_layer);
      
			MyMatrix z0;
			std::vector<MyMatrix> conv_A(conv_W.size());
			std::vector<MyMatrix> conv_Ap(conv_W.size());
			convFprop(curr_batch_size, conv_params, pool_params, act_func, conv_W, conv_B, X_batch, conv_A, conv_Ap, z0, poolIdxX1, poolIdxY1);
            
			// Forward propagation
			std::vector<MyMatrix> Z(n_layers-1);
			std::vector<MyMatrix> A(n_layers-2);
			std::vector<MyMatrix> Ap(n_layers-2);
			fprop(params.dropout_flag, act_func, W, W_out, B, z0, Z, A, Ap);
      
			// Compute the output and the error
			MyMatrix out;
			softmax(Z[n_layers-2], out);
      
			std::vector<MyMatrix> gradB(n_layers-1);
			gradB[n_layers-2] = out - one_hot_batch;

			// Backpropagation
			bprop(Wt, W_out, Ap, gradB);

			// Backpropagation for conv layer
			std::vector<MyMatrix> conv_gradB(conv_W.size());
			MyMatrix layer_gradB = (gradB[0] * W[0].transpose());
			MyMatrix pool_gradB;
			layer2pool(curr_batch_size, pool_params[conv_W.size()-1].N, conv_params[conv_W.size()-1].n_filter, layer_gradB, pool_gradB);
      
			convBprop(curr_batch_size, conv_params, pool_params, conv_W_T, conv_Ap, pool_gradB, conv_gradB, poolIdxX1, poolIdxY1);
      
			if(params.algo == "bprop"){
				update(eta, gradB, A, z0, params.regularizer, params.lambda, W_out, W, Wt, B);
				convUpdate(curr_batch_size, eta, conv_params, conv_gradB, conv_A, X_batch, "", 0., conv_W, conv_W_T, conv_B);
	
			}else{

				// Compute the metric
				std::vector<MyMatrix> metric_gradB(n_layers-1);
				std::vector<MyMatrix> metric_conv_gradB(conv_params.size());

				if(params.algo=="qdMCNat"){

					// Monte-Carlo Approximation of the metric
					std::vector<MyMatrix> mc_gradB(n_layers-1);
					computeMcError(out, mc_gradB[n_layers-2]);

					// Backpropagation
					bprop(Wt, W_out, Ap, mc_gradB);

					for(unsigned k = 0; k < gradB.size(); k++){
						metric_gradB[k] = mc_gradB[k].array().square();
					}

					// Backpropagation for conv layer
					std::vector<MyMatrix> mc_conv_gradB(conv_W.size());
					MyMatrix mc_layer_gradB = (mc_gradB[0] * W[0].transpose());
					MyMatrix mc_pool_gradB;
					layer2pool(curr_batch_size, pool_params[conv_W.size()-1].N, conv_params[conv_W.size()-1].n_filter, mc_layer_gradB, mc_pool_gradB);
	  
					convBprop(curr_batch_size, conv_params, pool_params, conv_W_T, conv_Ap, mc_pool_gradB, mc_conv_gradB, poolIdxX1, poolIdxY1);
	  
					for(unsigned k = 0; k < conv_params.size(); k++){
						metric_conv_gradB[k] = mc_conv_gradB[k].array().square();
					}
				}	
				else if(params.algo=="qdop"){

					for(unsigned k = 0; k < conv_params.size(); k++){
						metric_conv_gradB[k] = conv_gradB[k].array().square();
					}
					for(unsigned k = 0; k < gradB.size(); k++){
						metric_gradB[k] = gradB[k].array().square();
					}
				}
				else if(params.algo=="qdNat"){
	  
					for(unsigned k = 0; k < conv_params.size(); k++){
						metric_conv_gradB[k] = conv_gradB[k].array().square();
					}

					for(unsigned k = 0; k < metric_gradB.size(); k++){
						metric_gradB[k] = MyMatrix::Zero(gradB[k].rows(),gradB[k].cols());
					}

					for(unsigned l = 0; l < n_label; l++){
						MyMatrix fisher_ohbatch = MyMatrix::Zero(curr_batch_size, n_label);
						fisher_ohbatch.col(l).setOnes();

						std::vector<MyMatrix> fgradB(n_layers-1);
						fgradB[n_layers-2] = out - fisher_ohbatch;
						bprop(Wt, W_out, Ap, fgradB);

						// Backpropagation for conv layer
						std::vector<MyMatrix> fisher_conv_gradB(conv_W.size());
						MyMatrix fisher_layer_gradB = (fgradB[0] * W[0].transpose());
						MyMatrix fisher_pool_gradB;
						layer2pool(curr_batch_size, pool_params[conv_W.size()-1].N, conv_params[conv_W.size()-1].n_filter, fisher_layer_gradB, fisher_pool_gradB);
	    
						convBprop(curr_batch_size, conv_params, pool_params, conv_W_T, conv_Ap, fisher_pool_gradB, fisher_conv_gradB, poolIdxX1, poolIdxY1);

						for(unsigned k = 0; k < conv_params.size(); k++){
							MyMatrix fisher_conv_gradB_sq = fisher_conv_gradB[k].array().square();
							for(unsigned m = 0; m < out.rows(); m++){
								for(unsigned f = 0; f < conv_params[k].n_filter; f++){
									for(unsigned n = 0; n < conv_params[k].N * conv_params[k].N; n++){
										fisher_conv_gradB_sq(f,m*conv_params[k].N*conv_params[k].N+n) *= out(m,l);
									}
								}
							}
							metric_conv_gradB[k] += fisher_conv_gradB_sq;
						}
	    
						for(unsigned k = 0; k < W.size(); k++){
							const unsigned rev_k = n_layers - k - 2;
							metric_gradB[rev_k] += (fgradB[rev_k].array().square().array().colwise() * out.array().col(l)).matrix();
						}
					}
				}
	
				bool init_flag = false;
				if(i == 0 && j == 0 && !params.init_metric_id){
					init_flag = true;
				}

				std::vector<MyMatrix> conv_Mii(conv_params.size());
				std::vector<MyMatrix> conv_M0i(conv_params.size());
				std::vector<MyVector> conv_M00(conv_params.size());
	
				buildConvQDMetric(curr_batch_size, metric_conv_gradB, conv_A, X_batch, conv_W, params.matrix_reg, conv_Mii, conv_M0i, conv_M00);

				updateConvMetric(init_flag, params.metric_gamma, conv_pMii, conv_pM0i, conv_pM00, conv_Mii, conv_M0i, conv_M00);

				MyMatrix Mii_out, M0i_out;
				MyVector M00_out;
				std::vector<MySpMatrix> Mii(W.size());
				std::vector<MySpMatrix> M0i(W.size());
				std::vector<MyVector> M00(W.size());

				buildQDMetric(metric_gradB, A, z0, W_out, W, params.matrix_reg, Mii_out, M0i_out, M00_out, Mii, M0i, M00);

				updateMetric(init_flag, params.metric_gamma, Mii_out, M0i_out, M00_out, Mii, M0i, M00, pMii_out, pM0i_out, pM00_out, pMii, pM0i, pM00);
				update(eta, gradB, A, z0, params.regularizer, params.lambda, W_out, W, Wt, B, Mii_out, M0i_out, M00_out, Mii, M0i, M00);
			}
      
			double curr_time = gettime();
			cumul_time += curr_time - prev_time;      
      
			if(params.minilog_flag){
	
				double train_loss = 0.;
				double train_accuracy = 0.;
				double valid_loss = 0.;
				double valid_accuracy = 0.;
				evalModel(eval_act_func, params, n_train_batch, n_training, X_train, Y_train, conv_params, pool_params, conv_W, conv_B, W_out, W, B, train_loss, train_accuracy);
				evalModel(eval_act_func, params, n_valid_batch, n_valid, X_valid, Y_valid, conv_params, pool_params, conv_W, conv_B, W_out, W, B, valid_loss, valid_accuracy);
	
				// Logging
				*logger << i + float(j)/n_train_batch << " " << cumul_time << " " << train_loss <<  " " << train_accuracy << " " << valid_loss <<  " " << valid_accuracy << " " << eta << std::endl;
	
			}
		}
		if(!params.minilog_flag || params.adaptive_flag){
			double train_loss = 0.;
			double train_accuracy = 0.;
			double valid_loss = 0.;
			double valid_accuracy = 0.;
			evalModel(eval_act_func, params, n_train_batch, n_training, X_train, Y_train, conv_params, pool_params, conv_W, conv_B, W_out, W, B, train_loss, train_accuracy);
			evalModel(eval_act_func, params, n_valid_batch, n_valid, X_valid, Y_valid, conv_params, pool_params, conv_W, conv_B, W_out, W, B, valid_loss, valid_accuracy);
      
			// if(params.adaptive_flag)
			// 	adaptiveRule(train_loss, prev_loss, eta, W, B, pMii, pM0i, pM00, pW, pB, ppMii, ppM0i, ppM00);
      
			// Logging
			if(!params.minilog_flag){
				*logger << i  << " " << cumul_time << " " << train_loss <<  " " << train_accuracy << " " << valid_loss <<  " " << valid_accuracy << " " << eta << std::endl;
			}
		}
	}
}
Exemplo n.º 8
0
Arquivo: modes.c Projeto: geechee/iraf
/* QUERY -- Query the user for the value of a parameter.  Prompt with the
 *  current value if any.  Keep this up until we can push a reasonable value.
 *  Also, store the new value in the parameter (except for list params, where,
 *  since the values are not kept, all that may change is P_LEOF if seen).
 * Give prompt, or name if none, current value and range if int, real or 
 *   filename.  Accept CR to leave value unchanged, else take the string
 *   entered to be the new value.  Repeat until parameter value is in range.
 * We mean to talk straight to the user here; thus, interact with the real
 *   stdio, not the effective t_stdio, so that redirections do not get in
 *   the way.  In batch mode, a forced query is handled by writing a
 *   message on the terminal of the parent cl (the original stderr), and
 *   leaving some info describing the query in a file in uparm (if there is
 *   no uparm, we abort).  We then loop, waiting for the user to run "service"
 *   in the interactive cl to service the query, leaving the answer in a
 *   another file which we read and then delete.  If we wait a long time and
 *   get no response, we timeout.
 */
void 
query (struct param *pp)
{
	static	char *oormsg =
		"ERROR: Parameter value is out of range; try again";
	register char *ip;
	char	buf[SZ_PROMPTBUF+1];
	struct	operand o;
	int	bastype, batch, arrflag, offset=0, n_ele, max_ele, fd;
	char	*index(), *nlp, *nextstr();
	char	*bkg_query(), *query_status;
	char	*abuf;

	bastype = pp->p_type & OT_BASIC;
	batch = firstask->t_flags & T_BATCH;
	arrflag = pp->p_type & PT_ARRAY;

	if (arrflag) {			/* We may access the array many     */
	    offset = getoffset (pp);	/* times, so save the offset and    */
					/* push it when necessary.	    */
	    poffset (offset);
	    max_ele = size_array (pp) - offset;
	} else
	    max_ele = 1;


	forever {
	    if (batch) {
		/* Query from a background job.
		 */
		query_status = bkg_query (buf, SZ_PROMPTBUF, pp);

	    } else if (pp->p_type & (PT_GCUR|PT_IMCUR)) {
		/* Read a graphics cursor.
		 */
		char	source[33];
		int	cursor;

		/* Determine the source of graphics cursor input, chosen from
		 * either the graphics or image cursor or the terminal.
		 */
		if (pp->p_type & PT_GCUR) {
		    if (c_envfind ("stdgcur", source, 32) <= 0)
			strcpy (source, "stdgraph");
		} else {
		    if (c_envfind ("stdimcur", source, 32) <= 0)
			strcpy (source, "stdimage");
		}

		if (strcmp (source, "stdgraph") == 0)
		    cursor = STDGRAPH;
		else if (strcmp (source, "stdimage") == 0)
		    cursor = STDIMAGE;
		else
		    goto text_query;		/* get value from terminal */

		/* Read a physical graphics cursor.
		 */
		pp->p_flags &= ~P_LEOF;
		if (cursor == STDIMAGE) {
		    /* The following is a kludge used to temporarily implement
		     * the logical image cursor read.  In the future this will
		     * be eliminated, and the c_rcursor call below (cursor
		     * mode) will be used for stdimage as well as for stdgraph.
		     * The present code (IMDRCUR) goes directly to the display
		     * server to get the cursor value, bypassing cursor mode
		     * and the (currently nonexistent) stdimage kernel.
		     */
		    char    str[SZ_LINE+1], keystr[10];
		    int     wcs, key;
		    float   x, y;

		    if (c_imdrcur ("stdimage",
			&x,&y,&wcs,&key,str,SZ_LINE, 1, 1) == EOF) {
			query_status = NULL;

		    } else {
			if (isprint(key) && !isspace(key))
			    sprintf (keystr, "%c", key);
			else
			    sprintf (keystr, "\\%03o", key);
			sprintf (buf, "%.3f %.3f %d %s %s\n",
			    x, y, wcs, keystr, str);
		        query_status = (char *) ((XINT) strlen(buf));
		    }

		} else if (c_rcursor (cursor, buf, SZ_PROMPTBUF) == EOF) {
		    query_status = NULL;
		} else
		    query_status = (char *) ((XINT) strlen(buf));

	    } else if (pp->p_type & PT_UKEY) {
		/* Read a user keystroke command from the terminal.
		 */
		pp->p_flags &= ~P_LEOF;
		if (c_rdukey (buf, SZ_PROMPTBUF) == EOF)
		    query_status = NULL;
		else
		    query_status = (char *) ((XINT) strlen(buf));

	    } else {
text_query:	fd = spf_open (buf, SZ_PROMPTBUF);
		pquery (pp, fdopen(fd,"a"));
		spf_close (fd);

		c_stgputline ((XINT)STDOUT, buf);
		if (c_stggetline ((XINT)STDIN, buf, SZ_PROMPTBUF) > 0)
		    query_status = (char *) ((XINT) strlen(buf));
		else
		    query_status = NULL;
	    }

	    ip = buf;

	    /* Set o to the current value of the parameter.  Beware that some
	     * of the logical branches which follow assume that struct o has
	     * been initialized to the current value of the parameter.
	     */
	    if (pp->p_type & PT_LIST)
		setopundef (&o);
	    else if (arrflag) {
		paramget(pp, FN_VALUE);
		poffset (offset);
		o = popop();
	    } else
		o = pp->p_valo;

	    /* Handle eof, a null-length line (lone carriage return),
	     * and line with more than SZ_LINE chars.  Ignore leading whitespace
	     * if basic type is not string.
	     */
	    if (query_status == NULL) {
		/* Typing eof will use current value (as will a lone
		 * newline) but if param is a list, it is a meaningful
		 * answer.
		 */
		if (pp->p_type & PT_LIST) {
		    closelist (pp);		/* close an existing file */
		    pp->p_flags |= P_LEOF;
		    o = makeop (eofstr, OT_STRING);
		    break;
		}
		goto testval;
	    }

	    /* Ignore leading whitespace if it is not significant for this
	     * datatype.  Do this before testing for empty line, so that a
	     * return such as " \n" is equivalent to "\n".  I.e., do not
	     * penalize the user if they type the space bar by accident before
	     * typing return to accept the default value.
	     */
	    if (bastype != OT_STRING || (pp->p_type & (PT_FILNAM|PT_PSET)))
		while (*ip == ' ' || *ip == '\t')
		    ip++;

	    if (*ip == '\n') {
		/* Blank lines usually just accept the current value
		 * but if the param is a string and is undefined,
		 * it sets the string to a (defined) nullstring.
		 */
		*ip = '\0';
		if (bastype == OT_STRING && opundef (&o))
		    o = makeop (ip, bastype);
		else
		    goto testval;
	    }

	    if ((nlp = index (ip, '\n')) != NULL)
		*nlp = '\0';			/* cancel the newline	*/
	    else
		goto testval;

	    /* Finally, we have handled the pathological cases...
	     */
	    if ((pp->p_type & PT_LIST) &&
		(!strcmp (ip,eofstr) || !strcmp (ip,"eof"))) {

		closelist (pp);
		pp->p_flags |= P_LEOF;
		o = makeop (eofstr, OT_STRING);
		break;

	    } else {
		if (arrflag) {
		    /* In querying for arrays we may set more than one
		     * element of the array in a single query.  However
		     * we must set the first element.  So we will pretend
		     * to be a scalar until that first element is set
		     * and then enter a loop where we may set other
		     * elements.
		     */
		    abuf = ip;
		    ip = nextstr(&abuf, stdin);
		    if (ip == NULL  ||  ip == (char *) ERR  ||  ip == undefval)
			goto testval;
		}

		o = makeop (ip, bastype);
	    }

testval:
	    /* If parameter value is in range, we are done.  If it is out of
	     * range and we are a batch job or an interactive terminal job,
	     * print an error message and request that the user enter a legal
	     * value.  If the CL is being run taking input from a file, abort,
	     * else we will go into a loop reading illegal values from the
	     * input file and printing out lots of error messages.
	     */
	    if (inrange (pp, &o))
		break;
	    else if (batch)
		eprintf ("\n[%d] %s", bkgno, oormsg);
	    else if (isatty (fileno (stdin)))
		eprintf ("%s\n", oormsg);
	    else
		cl_error (E_UERR, oormsg);
	}

	if (!(pp->p_type & PT_LIST)) {
	    /* update param with new value.
	     */
	    if (cldebug) {
		eprintf ("changing `%s.p_val' to ", pp->p_name);
		fprop (stderr, &o);
		eprintf ("\n");
	    }

	    pushop (&o);
	    paramset (pp, FN_VALUE);
	    pp->p_flags |= P_QUERY;
	}

	pushop (&o);

	if (arrflag  &&  query_status != NULL  &&  *ip != '\0') {
	    /* If we have an array assign values until something
	     * is used up or until we hit any error.
	     */
	    n_ele = 1;
	    forever {
		if (n_ele >= max_ele)		/* End of array. */
		    break;
		ip = nextstr(&abuf, stdin);

		if (ip == NULL)			/* End of query line. */
		    break;

		if (ip == (char *) ERR) {	/* Error on query line. */
		    eprintf("Error loading array value.\n");
		    break;
		}

		if (ip != undefval) {
		    o = makeop (ip, bastype);
		    if ( ! inrange (pp, &o) ) {	/* Not in range. */
			eprintf("Array value outside range.\n");
			break;
		    }

		    offset++;			/* Next element in array. */
		    poffset (offset);

		    pushop (&o);
		    paramset (pp, FN_VALUE);
		} else
		    offset++;

		n_ele++;
	    }
	}
Exemplo n.º 9
0
void autoencoder_GPU::train(){
	for(int epoch = 0; epoch < nEpochNum; epoch++){
		dataprovider->reset();
		printf("Epoch %d\n", epoch + 1);
		gpu_reset(gpu_env, reset, d_error, nLayerSize0 * nVectorPerBatch, NULL);	

		for(int batch = 0; batch < nBatchNum; batch++){
			dataprovider->getNextDeviceBatch(d_layer0act);
			fprop();
			/*
			if(batch == 1){
				gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_bias0, CL_TRUE, 0, nLayerSize1 * sizeof(floatType), (void*)bias0, 0, NULL, NULL);
				ofstream tempStream;
				tempStream.open("../log/bias.log", ios_base::trunc);
				for(unsigned i = 0; i < nLayerSize1; i++){
					tempStream << bias0[i] << ',';
					if((i + 1) % nLayerSize1 == 0){
						tempStream << endl;
					}
				}
				tempStream.close();
			}

			if(batch == 1){
				gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_weight0, CL_TRUE, 0, nLayerSize0 * nLayerSize1 * sizeof(floatType), (void*)weight0, 0, NULL, NULL);
				ofstream tempStream;
				tempStream.open("../log/weight.log", ios_base::trunc);
				for(unsigned i = 0; i < nLayerSize0 * nLayerSize1; i++){
					tempStream << weight0[i] << ',';
					if((i + 1) % nLayerSize0 == 0){
						tempStream << endl;
					}
				}
				tempStream.close();
			}

			if(batch == 1){
				gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_layer1act, CL_TRUE, 0, nVectorPerBatch * nLayerSize1 * sizeof(floatType), (void*)layer1act, 0, NULL, NULL);
				ofstream tempStream;
				tempStream.open("../log/activation.log", ios_base::trunc);
				for(unsigned i = 0; i < nVectorPerBatch * nLayerSize1; i++){
					tempStream << layer1act[i] << ',';
					if((i + 1) % nVectorPerBatch == 0){
						tempStream << endl;
					}
				}
				tempStream.close();
				// exit(0);
			}
			*/

			gpu_squareError(gpu_env, squareError, d_layer8act, d_layer0act, d_error, nLayerSize0 * nVectorPerBatch);
			bprop();
			update();
		
			/*
			if(!epoch){
				double errsum = 0.0;
				gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_error, CL_TRUE, 0, nLayerSize0 * nVectorPerBatch * sizeof(floatType), (void*)error, 0, NULL, NULL);
				for(int i = 0; i < nLayerSize8 * nVectorPerBatch; i++){
					errsum += error[i];
				}
				printf("Epoch %d Batch %d Error %f\n", epoch + 1, batch + 1, errsum);
			}
			*/

		}

		double errsum = 0.0;
		gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_error, CL_TRUE, 0, nLayerSize0 * nVectorPerBatch * sizeof(floatType), (void*)error, 0, NULL, NULL);
		for(int i = 0; i < nLayerSize8 * nVectorPerBatch; i++){
			errsum += error[i];
		}
		printf("Epoch %d Error %f\n", epoch + 1, errsum);

		ofstream fout;
		fout.open("../log/errorLog.txt", ios_base::app);
		struct timeval now;
		gettimeofday(&now, NULL);
		fout << now.tv_sec << ',' << errsum << endl;
		fout.close();
	}

	ofstream fout;

	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_weight0, CL_TRUE, 0, nLayerSize0 * nLayerSize1 * sizeof(floatType), (void*)weight0, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_weight1, CL_TRUE, 0, nLayerSize1 * nLayerSize2 * sizeof(floatType), (void*)weight1, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_weight2, CL_TRUE, 0, nLayerSize2 * nLayerSize3 * sizeof(floatType), (void*)weight2, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_weight3, CL_TRUE, 0, nLayerSize3 * nLayerSize4 * sizeof(floatType), (void*)weight3, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_weight4, CL_TRUE, 0, nLayerSize4 * nLayerSize5 * sizeof(floatType), (void*)weight4, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_weight5, CL_TRUE, 0, nLayerSize5 * nLayerSize6 * sizeof(floatType), (void*)weight5, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_weight6, CL_TRUE, 0, nLayerSize6 * nLayerSize7 * sizeof(floatType), (void*)weight6, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_weight7, CL_TRUE, 0, nLayerSize7 * nLayerSize8 * sizeof(floatType), (void*)weight7, 0, NULL, NULL);

	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_bias0, CL_TRUE, 0, nLayerSize1 * sizeof(floatType), (void*)bias0, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_bias1, CL_TRUE, 0, nLayerSize2 * sizeof(floatType), (void*)bias1, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_bias2, CL_TRUE, 0, nLayerSize3 * sizeof(floatType), (void*)bias2, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_bias3, CL_TRUE, 0, nLayerSize4 * sizeof(floatType), (void*)bias3, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_bias4, CL_TRUE, 0, nLayerSize5 * sizeof(floatType), (void*)bias4, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_bias5, CL_TRUE, 0, nLayerSize6 * sizeof(floatType), (void*)bias5, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_bias6, CL_TRUE, 0, nLayerSize7 * sizeof(floatType), (void*)bias6, 0, NULL, NULL);
	gpu_env.status = clEnqueueReadBuffer(gpu_env.queue, d_bias7, CL_TRUE, 0, nLayerSize8 * sizeof(floatType), (void*)bias7, 0, NULL, NULL);

	fout.open("../data/autoencoderWeight.dat", ios_base::binary | ios_base::trunc);
	fout.write((char*)weight0, nLayerSize0 * nLayerSize1 * sizeof(floatType));
	fout.write((char*)weight1, nLayerSize1 * nLayerSize2 * sizeof(floatType));
	fout.write((char*)weight2, nLayerSize2 * nLayerSize3 * sizeof(floatType));
	fout.write((char*)weight3, nLayerSize3 * nLayerSize4 * sizeof(floatType));
	fout.write((char*)weight4, nLayerSize4 * nLayerSize5 * sizeof(floatType));
	fout.write((char*)weight5, nLayerSize5 * nLayerSize6 * sizeof(floatType));
	fout.write((char*)weight6, nLayerSize6 * nLayerSize7 * sizeof(floatType));
	fout.write((char*)weight7, nLayerSize7 * nLayerSize8 * sizeof(floatType));
	fout.close();
	fout.open("../data/autoencoderBias.dat", ios_base::binary | ios_base::trunc);
	fout.write((char*)bias0, nLayerSize1 * sizeof(floatType));
	fout.write((char*)bias1, nLayerSize2 * sizeof(floatType));
	fout.write((char*)bias2, nLayerSize3 * sizeof(floatType));
	fout.write((char*)bias3, nLayerSize4 * sizeof(floatType));
	fout.write((char*)bias4, nLayerSize5 * sizeof(floatType));
	fout.write((char*)bias5, nLayerSize6 * sizeof(floatType));
	fout.write((char*)bias6, nLayerSize7 * sizeof(floatType));
	fout.write((char*)bias7, nLayerSize8 * sizeof(floatType));
	fout.close();
		
}
Exemplo n.º 10
0
/*
-- mlp_forback --------------------------------------------------------
*/
int mlp_forback(
    float *stims,
    int *stimstarts,
    int ndim,
    int negs,
    int *stimoffs,
    int nin,
    int *nunits,
    int nlevels,
    int *tranfns,
    float *activs,
    float *biases,
    int ntunits,
    float *weights,
    int nweights,
    float *bschange,
    float *wtchange,
    float *etas,
    float *etbs,
    float alpha,
    float decay,
    float *targs,
    int *targstarts,
    int *targoffs,
    int nout,
    int niter,
    int nbatch,
    int ransel,
    float *err,
    float *errvar)
/* Carries out niter learning cycles on the machine, selecting
 stimuli at random from the stims and targs arrays if
 RANSEL is non-zero, otherwise taking them in sequence.
 Other parameters are as in bprop, fprop and mlp_forward, with targs
 etc. instead of outs etc.

 If nbatch is 1, then does continuous learning with momentum governed by
 alpha. If nbatch is greater than 1, then does batch learning, averaging
 errors over nbatch examples before updating. In this case, alpha is ignored.

 - one special case - if niter is 0, just do a single
 backward pass, assuming that the forward pass has already
 been carried out.

 On return activs is set to the latest error signals,
 and an explicit call of fprop is needed to get activations.

 Err returns the mean error, errvar its variance. Returns fail code. */
{
    float anegs = negs, cerr, errsum = 0.0, errsumsqu = 0.0;
    int iter, eg = -1, dofwd = niter > 0, batching = nbatch > 1, si, so;

    /* check inputs to avoid having to do so on each iteration */
    int ifail = checkns(nunits,nlevels,nin,nout,ntunits,nweights);
    if (ifail) return ifail;
    if (nbatch <= 0) return 10;

    /* When batching, niter is given as number of batches - change to
       no of egs and ensure weight change arrays are zeroed */
    if (batching) {
        niter *= nbatch;
        mlp_fillvec(0.0, wtchange, nweights);
        mlp_fillvec(0.0, bschange, ntunits);
    }

    if (niter == 0) niter = 1;  /* Always do a backward pass */

    /* Iterate */
    for (iter = 1; iter <= niter; iter++) {
        if (ndim) {
            /* stimstarts is n-D array giving limits */
            si = mlp_getsample(ndim, stimstarts, ransel);
            so = mlp_getoutsample(ndim, stimstarts, targstarts);
        } else {
            /* stimstarts 1-D array of starting points */
            if (ransel)
                eg = (int)(erand48(seed) * anegs);
            else
                eg = (eg+1) % negs;
            si = *(stimstarts+eg);
            so = *(targstarts+eg);
        }

        if (dofwd)
            fprop(stims+si, stimoffs,nin,nunits,nlevels,tranfns,activs,
                biases,ntunits,weights,nweights);

        if (batching) {
            cerr = bprop_batch(stims+si, stimoffs, nin, nunits, nlevels,
                tranfns, activs, ntunits, weights, nweights, wtchange,
                bschange, targs+so, targoffs, nout);
            if (iter % nbatch == 0)
                bwtupd_batch(biases, ntunits, weights, nweights,
                    wtchange, bschange, etas, etbs, decay, nbatch);
        }
        else
            cerr = bprop(stims+si, stimoffs, nin, nunits, nlevels, tranfns,
                activs, biases, ntunits, weights, nweights, wtchange,
                bschange, etas, etbs, alpha, decay, targs+so, targoffs,
                nout);

        errsum += cerr;
        errsumsqu += cerr * cerr;
    }

    /* Calculate the error and its variance over this set of trials.
     It's divided by 2 because bprop returns a sum of squares, but actually
      uses the derivative with respect to half the sum of squares. */
    *err = errsum/(2*niter);
    *errvar = errsumsqu/(4*niter) - *err * *err;
    return 0;
}
Exemplo n.º 11
0
/*
-- mlp_forward --------------------------------------------------------
*/
int mlp_forward(
    float *stims,
    int *stimstarts,
    int ndim,
    int negs,
    int *stimoffs,
    int nin,
    int *nunits,
    int nlevels,
    int *tranfns,
    float *activs,
    float *biases,
    int ntunits,
    float * weights,
    int nweights,
    float *outs,
    int *outstarts,
    int *outoffs,
    int nout)
/* Iterates fprop, storing the results from each iteration.

 Data for the i'th iteration are taken from
 stims[stimstarts[i-1]+stimoffs[0]], stims[stimstarts[i-1]+stimoffs[1]]
 ... stims[stimstarts[i-1]+stimoffs[nin-1]].

 Results are stored in a similar manner in outs.

 Arguments in between are as for fprop. */
{
    float *activout;

    /* check inputs to avoid having to do so on each iteration */
    int ifail = checkns(nunits,nlevels,nin,nout,ntunits,nweights);
    if (ifail) return ifail;

    activout = activs + ntunits - nout;

    if (ndim == 0) {
        /* The case where stimstarts is just a 1-D array of possible
            starting points. */
        int *stimstartsend = stimstarts + negs;
        /* forward propagate */
        while (stimstarts < stimstartsend) {
            fprop (stims + *stimstarts++, stimoffs, nin,
                nunits, nlevels, tranfns, activs,
                biases, ntunits, weights, nweights);
            /* copy outputs into output array (do this here rather than in
                fprop so that mlp_forback need not provide output array) */
            fcopout(activout, outs + *outstarts++, outoffs, nout);
        }
    }
    else {
        /* The case where stimstarts is an ndim-dimensional array of
            start, increment and end coordinates. */
        int eg;
        for (eg=0; eg<negs; eg++) {
            /* forward propagate */
            int s = mlp_getsample(ndim, stimstarts, 0);
            fprop (stims + s, stimoffs, nin, nunits, nlevels, tranfns, activs,
                    biases, ntunits, weights, nweights);
            s = mlp_getoutsample(ndim, stimstarts, outstarts);
            fcopout(activout, outs+s, outoffs, nout);
        }
    }
    return 0;
}