Array computeOutputs(Array xValues){
        for(int i = 0; i < numHidden; i++){
            float sum = 0;
            for(int j = 0; j < numInput; j++){
                float in = xValues[j];
                float w = ihWeights[j][i];
                sum += in*w;
            }
            sum += ihBiases[i];
            float out = activation(sum);
            ihOutputs[i] = out;
            ihSums[i] = sum;
        }

        Array yValues;
        for(int i = 0; i < numOutput; i++){
            float sum = 0;
            for(int j = 0; j < numHidden; j++){
                float in = ihOutputs[j];
                float w = hoWeights[j][i];
                sum += in*w;
            }
            sum += hoBiases[i];
            float out = activation(sum);
            yValues.push_back(out);
            hoSums[i] = sum;
        }
        return yValues;
    }
示例#2
0
文件: pvmove.c 项目: andyvand/cyglvm2
static int _pvmove_target_present(struct cmd_context *cmd, int clustered)
{
	const struct segment_type *segtype;
	unsigned attr = 0;
	int found = 1;
	static int _clustered_found = -1;

	if (clustered && _clustered_found >= 0)
		return _clustered_found;

	if (!(segtype = get_segtype_from_string(cmd, "mirror")))
		return_0;

	if (activation() && segtype->ops->target_present &&
	    !segtype->ops->target_present(cmd, NULL, clustered ? &attr : NULL))
		found = 0;

	if (activation() && clustered) {
		if (found && (attr & MIRROR_LOG_CLUSTERED))
			_clustered_found = found = 1;
		else
			_clustered_found = found = 0;
	}

	return found;
}
示例#3
0
文件: nnet.c 项目: diogo149/sparsenn
/* Trains a network by presenting an example and 
 * adjusts the weights by stochastic gradient 
 * descent to reduce a squared hinge loss
 */
void train(nnet_t* n, sparse_t* v, int target){
    int i;
    /* Forward pass */
    cblas_scopy(n->hidden,n->b1,1,n->a1,1);
    for(i=0; i<v->nz; i++){
        cblas_saxpy(n->hidden, v->x[i], n->W1[v->idx[i]], 1, n->a1, 1);
    }
    activation(n->a1,n->x1,n->g1,n->hidden);
    n->a2 = n->b2 + cblas_sdot(n->hidden, n->W2, 1, n->x1, 1);
    activation(&n->a2,&n->x2,&n->g2,1);
    if(target*n->x2 > 1)
        /* Hinge loss, no error -> no need to backpropagate */
        return;
    /* Backward pass */
    n->d2 = (target-n->x2)*n->g2;
    cblas_scopy(n->hidden,n->W2,1,n->d1,1);
    for(i=0; i<n->hidden; i++)
        n->d1[i] *= n->d2*n->g1[i];
    n->b2 += n->eta*n->d2;
    cblas_saxpy(n->hidden, n->eta*n->d2, n->x1, 1, n->W2, 1);
    cblas_saxpy(n->hidden, n->eta, n->d1, 1, n->b1, 1);
    /* Sparse inputs imply sparse gradients.
     * This update saves a lot of computation
     * compared to general purpose neural net
     * implementations.
     */
    for(i=0; i<v->nz; i++){
        cblas_saxpy(n->hidden, n->eta*v->x[i], n->d1, 1, n->W1[v->idx[i]], 1);
    }
}
示例#4
0
文件: nnet.c 项目: diogo149/sparsenn
/* Given an input vector v, compute the output of the network. */
float value(nnet_t* n, sparse_t* v){
    int i;
    cblas_scopy(n->hidden,n->b1,1,n->a1,1);
    for(i=0; i<v->nz; i++){
        cblas_saxpy(n->hidden, v->x[i], n->W1[v->idx[i]], 1, n->a1, 1);
    }
    activation(n->a1,n->x1,n->g1,n->hidden);
    n->a2 = n->b2;
    n->a2 += cblas_sdot(n->hidden, n->W2, 1, n->x1, 1);
    activation(&n->a2,&n->x2,&n->g2,1);
    return n->x2;
}
示例#5
0
EigenVector MLP::modifyDelta(EigenVector const &input, EigenVector const &output, integer const & layer)
{
    EigenVector yj = input;

    if (layer == m_last)
        m_Delta[layer] =
                  activation(m_layers[layer], yj, m_derivativeActivationFunction).asDiagonal()
                * (output - activation(m_layers[layer], yj, m_activationFunction));
    else
        m_Delta[layer] =
                  activation(m_layers[layer], yj, m_derivativeActivationFunction).asDiagonal()
                * m_layers[layer+1].block(0,0,m_layers[layer+1].rows(), m_layers[layer+1].cols()-1).transpose()
                * modifyDelta(activation(m_layers[layer], yj, m_activationFunction), output,layer+1);
    return m_Delta[layer];
}
示例#6
0
文件: card_ll.c 项目: roma-jam/scard
// stop everything then activate card again.
static void scard_cold_rst(ISO7816_SC* scard) {
    deactivation(scard);
    _delay_ms(51);
    activation(scard);
    _delay_ms(251);     // Wait more than 400 clock cycles
    RST_HI();
}
示例#7
0
cv::Mat MultilayerPerceptron::feed_forward(cv::Mat input)
{
	float* input_ptr = input.ptr< float >(0);
	float* output_ptr = _last_results.ptr< float >(0);
	for(int i = 0; i < input.cols; ++i)
	{
		output_ptr[i] = input_ptr[i];
	}
	int* layer_ptr = _layers.ptr< int >(0);
	layer_ptr++; //Skip input layer;
	for(int l = 0; l < _weights.size(); ++l)
	{
		input_ptr = _last_results.ptr< float >(l);
		output_ptr = _last_results.ptr< float >(l + 1);
		float* sum_ptr = _last_sums.ptr< float >(l);
		float* bias_ptr = _biases.ptr< float >(l);
		for(int n = 0; n < layer_ptr[l]; ++n)
		{
			float* weight_ptr = _weights[l].ptr< float >(n);
			sum_ptr[n] = bias_ptr[n];
			//Use number of nodes of previous layer as number of inputs.
			for(int w = 0; w < layer_ptr[l - 1]; ++w)
			{
				sum_ptr[n] += weight_ptr[w] * input_ptr[w];
			}
			output_ptr[n] = activation(sum_ptr[n]);
		}
	}
	int num_outputs = _layers.at< int >(_layers.cols - 1);
	return _last_results.row(_last_results.rows - 1).colRange(0, num_outputs);
}
示例#8
0
double * currentOutput(Dataset *d,Perceptron * p){
    double * y=(double*) malloc(d->n*sizeof(double));
    int i;
    for(i=0;i<d->n;i++){
        y[i]=activation(p,d->samples[i]);
    }
    return y;
}
示例#9
0
int createFShader(char **ret) {
  int retLen;
  char *eq;
  createEQ(&eq);
  retLen = asprintf(ret, fragmentShader, nnData.weightsSize, activation(), gaussian, eq);
  free(eq);
  printf("%s", *ret);
  return retLen;
}
示例#10
0
FolderViewTreeView::FolderViewTreeView(QWidget* parent):
  QTreeView(parent),
  layoutTimer_(NULL),
  doingLayout_(false),
  activationAllowed_(true) {

  header()->setStretchLastSection(false);
  setIndentation(0);

  connect(this, SIGNAL(activated(QModelIndex)), this, SLOT(activation(QModelIndex)));
}
示例#11
0
// Feed ANN with input and receive output
void ANN::feedThrough(double* input, double* output) {
	copy(input, in_data_, inp_);
	in_data_[inp_] = 1;
	
	multiply(fl_, in_data_, mid_data_, hid_, inp_+1, 1);
	activation(mid_data_, hid_);
	mid_data_[hid_] = 1;

	multiply(sl_, mid_data_, output, out_, hid_+1, 1);
	activationOut(output, out_);
}
示例#12
0
文件: Neuron.cpp 项目: TIXFeniks/NNSA
void Neuron::process() 
{
	summ=0;
	for each (double a in inputs) {
		summ += a;
	}

	activation();
	activationDer();

	output = act;// log(1+ exp(summ));
	inputs.clear();
}
示例#13
0
void neuralNet::activation_approx_sse(const float* _neuronOutput, float* result)
{
	BOOST_STATIC_ASSERT(SIGMOIDCOEFFICIENT == 4.0f);
	// code adapted from http://ybeernet.blogspot.com/2011/03/speeding-up-sigmoid-function-by.html
	// approximates sigmoid function with coefficient 4.0f
	static const __m128 ones = _mm_set1_ps(1.0f);
	static const __m128 oneFourths = _mm_set1_ps(0.25f);
	static const __m128 fours = _mm_set1_ps(4.0f);

	__m128 temp;
	const __m128* vOutput = (__m128*)_neuronOutput;

	// min (output, 4.0)
	temp = _mm_min_ps(*vOutput, fours);
	// multiply by 0.25
	temp = _mm_mul_ps(temp, oneFourths);
	// 1 - ans
	temp = _mm_sub_ps(ones, temp);
	// ans^16
	temp = _mm_mul_ps(temp, temp);
	temp = _mm_mul_ps(temp, temp);
	temp = _mm_mul_ps(temp, temp);
	temp = _mm_mul_ps(temp, temp);
	// 1 + ans
	temp = _mm_add_ps(ones, temp);
	// 1 / ans
	temp = _mm_rcp_ps(temp);

#ifndef NDEBUG
	const float* _temp = (float*)&temp;
	assert(fastabs(_temp[0] - activation(_neuronOutput[0])) < 0.05f);
	assert(fastabs(_temp[1] - activation(_neuronOutput[1])) < 0.05f);
	assert(fastabs(_temp[2] - activation(_neuronOutput[2])) < 0.05f);
	assert(fastabs(_temp[3] - activation(_neuronOutput[3])) < 0.05f);
#endif

	// return ans
	_mm_store_ps(result, temp);
};
示例#14
0
/*
 * Some kernels have a bug that they may leak space in the snapshot on crash.
 * If the kernel is buggy, we add some extra space.
 */
static uint64_t _cow_extra_chunks(struct cmd_context *cmd, uint64_t n_chunks)
{
	const struct segment_type *segtype;
	unsigned attrs = 0;

	if (activation() &&
	    (segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_SNAPSHOT)) &&
	    segtype->ops->target_present &&
	    segtype->ops->target_present(cmd, NULL, &attrs) &&
	    (attrs & SNAPSHOT_FEATURE_FIXED_LEAK))
		return 0;

	return (n_chunks + 63) / 64;
}
示例#15
0
SysTray::SysTray(Playground *playground)
    : QSystemTrayIcon(playground)
{
  this->playground = playground;
  this->prefsWindow = new PrefsWindow(playground, playground); //Why isn't QSystemTray a QWidget?
  setIcon(QIcon(":/box/box.svg"));
  
  //Quit Action
  quitAction = new QAction(tr("&Quit"), this);
  connect(quitAction, SIGNAL(triggered()), qApp, SLOT(quit()));
  
  //Clear Action
  clearAction = new QAction(tr("C&lear"), this);
  connect(clearAction, SIGNAL(triggered()), playground, SLOT(clear()));
  
  //Show Preferences Window
  prefsAction = new QAction(tr("&Preferences"), this);
  connect(prefsAction, SIGNAL(triggered()), prefsWindow, SLOT(showPreferences()));
  
  //Group of shape actions
  shapeActions = new QActionGroup(this);
     
  //Drop circle - enabled by default
  dropCircleAction = new QAction(tr("&Circles"), shapeActions);
  dropCircleAction->setCheckable(true);
  dropCircleAction->setChecked(true);
  connect(dropCircleAction, SIGNAL(triggered()), this, SLOT(setCircles()));
  setCircles();
  
  //Drop boxes
  dropSquareAction = new QAction(tr("&Squares"), shapeActions);
  dropSquareAction->setCheckable(true);
  connect(dropSquareAction, SIGNAL(triggered()), this, SLOT(setSquares()));

  //Build the menu
  trayIconMenu = new QMenu(playground);
  trayIconMenu->addAction(dropSquareAction);
  trayIconMenu->addAction(dropCircleAction);
  trayIconMenu->addSeparator();
  trayIconMenu->addAction(clearAction);
  trayIconMenu->addAction(prefsAction);
  trayIconMenu->addSeparator();
  trayIconMenu->addAction(quitAction);
  setContextMenu(trayIconMenu);
  
  //Tells us if a mouse click on the tray necessitates block creation
  connect(this, SIGNAL(activated(QSystemTrayIcon::ActivationReason)), this, SLOT(activation(QSystemTrayIcon::ActivationReason)));
}
示例#16
0
EigenMatrix MLP::run(const integer &exampleIndex, integer layer)
// calcule la sortie associée à la matrice "m_input" jusqu'à couche numéro "layer"
{
    EigenMatrix output;
    if (exampleIndex == -1)
        output = m_input;
    else
        output = m_input.col(exampleIndex);

    if (layer < 0)
        layer = m_last;

    for(integer j = 0; j <= layer; ++j)
        output = activation(m_layers[j], output, m_activationFunction);
    return output;
}
示例#17
0
void neuralNet::activation_approx(const float* _neuronOutput, float* result)
{
	BOOST_STATIC_ASSERT(SIGMOIDCOEFFICIENT == 4.0f);
	// code from http://ybeernet.blogspot.com/2011/03/speeding-up-sigmoid-function-by.html
	// approximates sigmoid function with coefficient 4.0f
	float tmp = std::min(*_neuronOutput, 4.0f);
	tmp = 1.0f - 0.25f * tmp;
	tmp *= tmp;
	tmp *= tmp;
	tmp *= tmp;
	tmp *= tmp;
	tmp = 1.0f / (1.0f + tmp);

	assert(fastabs(tmp - activation(*_neuronOutput)) < 0.05f);

	// return ans
	*result = tmp;
};
示例#18
0
void transparent_transission_receive(unsigned char *buf,unsigned char len)
{
    unsigned char cmd;
    cmd = buf[0];

   // printf("[recv_data],send len %d data %s\n", len, buf);

    switch(cmd)
    {
        case 'a':   
            activation();
            break;
        case 'b':
            run_flag = true;
            break;
        case 'c':
            run_flag = false;
            break;
        default:
            break;
    }
}
示例#19
0
void MainViewWidget::slotActivateAll()
{
	activation(FMFontDb::DB()->getFilteredFonts(), true);
}
示例#20
0
void MainViewWidget::slotDesactivateAll()
{
	activation(FMFontDb::DB()->getFilteredFonts(), false);
}
示例#21
0
FolderViewListView::FolderViewListView(QWidget* parent):
  QListView(parent),
  activationAllowed_(true) {
  connect(this, SIGNAL(activated(QModelIndex)), this, SLOT(activation(QModelIndex)));
}
示例#22
0
static int _vgrename_single(struct cmd_context *cmd, const char *vg_name,
			    struct volume_group *vg, struct processing_handle *handle)
{
	struct vgrename_params *vp = (struct vgrename_params *) handle->custom_handle;
	struct lvmcache_vginfo *vginfo;
	char old_path[NAME_LEN];
	char new_path[NAME_LEN];
	struct id id;
	const char *name;
	char *dev_dir;

	/*
	 * vg_name_old may be a UUID which process_each_vg
	 * replaced with the real VG name.  In that case,
	 * vp->vg_name_old will be the UUID and vg_name will be
	 * the actual VG name.  Check again if the old and new
	 * names match, using the real names.
	 */
	if (vp->old_name_is_uuid && !strcmp(vp->vg_name_new, vg_name)) {
		log_error("New VG name must differ from the old VG name.");
		return ECMD_FAILED;
	}

	/*
	 * Check if a VG already exists with the new VG name.
	 *
	 * When not using lvmetad, it's essential that a full scan has
	 * been done to ensure we see all existing VG names, so we
	 * do not use an existing name.  This has been done by
	 * process_each_vg REQUIRES_FULL_LABEL_SCAN.
	 *
	 * (FIXME: We could look for the new name in the list of all
	 * VGs that process_each_vg created, but we don't have access
	 * to that list here, so we have to look in lvmcache.
	 * This requires populating lvmcache when using lvmetad.)
	 */
	lvmcache_seed_infos_from_lvmetad(cmd);

	if ((vginfo = lvmcache_vginfo_from_vgname(vp->vg_name_new, NULL))) {
		log_error("New VG name \"%s\" already exists", vp->vg_name_new);
		return ECMD_FAILED;
	}

	if (id_read_format_try(&id, vp->vg_name_new) &&
	    (name = lvmcache_vgname_from_vgid(cmd->mem, (const char *)&id))) {
		log_error("New VG name \"%s\" matches the UUID of existing VG %s", vp->vg_name_new, name);
		return ECMD_FAILED;
	}

	/*
	 * Lock the old VG name first:
	 * . The old VG name has already been locked by process_each_vg.
	 * . Now lock the new VG name here, second.
	 *
	 * Lock the new VG name first:
	 * . The new VG name has already been pre-locked below,
	 *   before process_each_vg was called.
	 * . process_each_vg then locked the old VG name second.
	 * . Nothing to do here.
	 *
	 * Special case when the old VG name is a uuid:
	 * . The old VG's real name wasn't known before process_each_vg,
	 *   so the correct lock ordering wasn't known beforehand,
	 *   so no pre-locking was done.
	 * . The old VG's real name has been locked by process_each_vg.
	 * . Now lock the new VG name here, second.
	 * . Suppress lock ordering checks because the lock order may
	 *   have wanted the new name first, which wasn't possible in
	 *   this uuid-for-name case.
	 */
	if (vp->lock_vg_old_first || vp->old_name_is_uuid) {
		if (vp->old_name_is_uuid)
			lvmcache_lock_ordering(0);

		if (!_lock_new_vg_for_rename(cmd, vp->vg_name_new))
			return ECMD_FAILED;

		lvmcache_lock_ordering(1);
	}

	dev_dir = cmd->dev_dir;

	if (!archive(vg))
		goto error;

	/* Remove references based on old name */
	if (!drop_cached_metadata(vg))
		stack;

	if (!lockd_rename_vg_before(cmd, vg)) {
		stack;
		goto error;
	}

	/* Change the volume group name */
	vg_rename(cmd, vg, vp->vg_name_new);

	/* store it on disks */
	log_verbose("Writing out updated volume group");
	if (!vg_write(vg) || !vg_commit(vg)) {
		goto error;
	}

	sprintf(old_path, "%s%s", dev_dir, vg_name);
	sprintf(new_path, "%s%s", dev_dir, vp->vg_name_new);

	if (activation() && dir_exists(old_path)) {
		log_verbose("Renaming \"%s\" to \"%s\"", old_path, new_path);

		if (test_mode())
			log_verbose("Test mode: Skipping rename.");

		else if (lvs_in_vg_activated(vg)) {
			if (!vg_refresh_visible(cmd, vg)) {
				log_error("Renaming \"%s\" to \"%s\" failed", 
					old_path, new_path);
				goto error;
			}
		}
	}

	lockd_rename_vg_final(cmd, vg, 1);

	if (!backup(vg))
		stack;
	if (!backup_remove(cmd, vg_name))
		stack;

	unlock_vg(cmd, vp->vg_name_new);
	vp->unlock_new_name = 0;

	log_print_unless_silent("Volume group \"%s\" successfully renamed to \"%s\"",
				vp->vg_name_old, vp->vg_name_new);
	return 1;

 error:
	unlock_vg(cmd, vp->vg_name_new);
	vp->unlock_new_name = 0;

	lockd_rename_vg_final(cmd, vg, 0);

	return 0;
}
示例#23
0
void Player::update()
{
	float maxLadderSpeed = 140;

	//Movement

	chr.update();
	this->x = chr.getX();
	this->y = chr.getY();

	//Ladders
	bool isLadder = false;
	if(world->isLadder(chr.getFeetX(), chr.getFeetY()))
	{
		isLadder = true;
	}

	//Jump
	if(Input::up() && (lastJump + 0.1f < globaltime || isLadder == true))
	{
		if(chr.checkOnGround(world))
		{
			chr.jump(-1500);

			lastJump = globaltime;
		}
		if(isLadder)
		{
			chr.setVelocityY(-maxLadderSpeed);
		}
	}

	float targetSpeed = 0;
	if(Input::left())
	{
		targetSpeed = -speed;
	}
	if(Input::right())
	{
		targetSpeed = speed;
	}

	chr.targetSpeed(targetSpeed);

	activation(); //Function for looking thru all the parts to find close usable parts

	//Positioning the camera
	cameraX = x - (agk::GetVirtualWidth() / agk::GetViewZoom() / 2);
	cameraY = y - (agk::GetVirtualHeight() / agk::GetViewZoom() / 2) - 200;

	agk::SetViewOffset(cameraX, cameraY);

	PathLink* closestLink = this->world->findClosestLink(x, y);
	if(closestLink != NULL)
	{
		agk::Print(closestLink->getID());
	}

	//Updating platforms
	world->updatePlrFeet(chr.getFeetX(), chr.getFeetY());

	if(isLadder == true)
	{
		chr.capYVelocity(maxLadderSpeed, -maxLadderSpeed);
	}
}
示例#24
0
inline void layer::calc_outputs(VectorXd &inputs)
{
    outputs = weights.leftCols(weights.cols() - 1) * inputs;
    outputs += weights.rightCols(1) * bias;
    activation(outputs);
}