bool ExpandableBlockStreamExchangeMaterialized::open(const PartitionOffset& part_off){
	if(sem_open_.try_wait()){
		nexhausted_lowers_=0;
		received_block_stream_=BlockStreamBase::createBlock(state_.schema_,state_.block_size_);
		block_for_socket_=new BlockReadableFix(received_block_stream_->getSerializedBlockSize(),state_.schema_);
		buffer=new BlockStreamBuffer(state_.block_size_,nlowers_*10,state_.schema_);
		if(PrepareTheSocket()==false)
			return false;

		if(RegisterExchange(ExchangeID(state_.exchange_id_,0))==false){
			Logging_ExpandableBlockStreamExchangeMaterialized("Register Exchange with ID=%l fails!",state_.exchange_id_);
		}

		if(isMaster()){
			Logging_ExpandableBlockStreamExchangeMaterialized("This exchange is the master one, serialize the iterator subtree to the children...");
			if(SerializeAndSendToMulti()==false)
				return false;
		}

		if(WaitForConnectionFromLowerExchanges()==false){
			return false;
		}

		if(pthread_create(&receiver_tid,NULL,receiver,this)!=0){
			Logging_ExpandableBlockStreamExchangeMaterialized("Failed to create receiver thread.");
			return false;
		}
		open_finished_=true;
		return true;

	}
	else{
		while(!open_finished_){
			usleep(1);
		}
		return true;
	}

}
Example #2
0
void Object::notifyDetach()
{
    if( !isMaster( ))
        return;

    // unmap slaves
    const Nodes slaves = _cm->getSlaveNodes();
    if( slaves.empty( ))
        return;

    EQWARN << slaves.size() << " slaves subscribed during deregisterObject of "
           << base::className( this ) << " id " << _id << std::endl;

    NodeUnmapObjectPacket packet;
    packet.objectID = _id;

    for( NodesCIter i = slaves.begin(); i != slaves.end(); ++i )
    {
        NodePtr node = *i;
        node->send( packet );
    }
}
Example #3
0
bool StyleEngine::updateActiveStyleSheets(StyleResolverUpdateMode updateMode)
{
    ASSERT(isMaster());
    ASSERT(!m_document.inStyleRecalc());

    if (!m_document.isActive())
        return false;

    bool requiresFullStyleRecalc = false;
    if (m_documentScopeDirty || updateMode == FullStyleUpdate)
        requiresFullStyleRecalc = m_documentStyleSheetCollection.updateActiveStyleSheets(this, updateMode);

    if (shouldUpdateShadowTreeStyleSheetCollection(updateMode)) {
        TreeScopeSet treeScopes = updateMode == FullStyleUpdate ? m_activeTreeScopes : m_dirtyTreeScopes;
        HashSet<TreeScope*> treeScopesRemoved;

        for (TreeScopeSet::iterator it = treeScopes.begin(); it != treeScopes.end(); ++it) {
            TreeScope* treeScope = *it;
            ASSERT(treeScope != m_document);
            ShadowTreeStyleSheetCollection* collection = static_cast<ShadowTreeStyleSheetCollection*>(styleSheetCollectionFor(*treeScope));
            ASSERT(collection);
            collection->updateActiveStyleSheets(this, updateMode);
            if (!collection->hasStyleSheetCandidateNodes())
                treeScopesRemoved.add(treeScope);
        }
        if (!treeScopesRemoved.isEmpty())
            for (HashSet<TreeScope*>::iterator it = treeScopesRemoved.begin(); it != treeScopesRemoved.end(); ++it)
                m_activeTreeScopes.remove(*it);
    }

    InspectorInstrumentation::activeStyleSheetsUpdated(&m_document);
    m_usesRemUnits = m_documentStyleSheetCollection.usesRemUnits();

    m_dirtyTreeScopes.clear();
    m_documentScopeDirty = false;

    return requiresFullStyleRecalc;
}
Example #4
0
bool Score::saveFile(QIODevice* f, bool msczFormat, bool onlySelection)
      {
      XmlWriter xml(this, f);
      xml.setWriteOmr(msczFormat);
      xml.header();
      if (!MScore::testMode) {
            xml.stag("museScore version=\"" MSC_VERSION "\"");
            xml.tag("programVersion", VERSION);
            xml.tag("programRevision", revision);
            }
      else
            xml.stag("museScore version=\"3.01\"");
      write(xml, onlySelection);
      xml.etag();
      if (isMaster())
            masterScore()->revisions()->write(xml);
      if (!onlySelection) {
            //update version values for i.e. plugin access
            _mscoreVersion = VERSION;
            _mscoreRevision = revision.toInt(0, 16);
            _mscVersion = MSCVERSION;
            }
      return true;
      }
Example #5
0
void SketchingTool::saveCurvesCallback(Misc::CallbackData* cbData)
	{
	if(isMaster())
		{
		try
			{
			/* Save all curves to a curve file: */
			Misc::File curveFile(Misc::createNumberedFileName(factory->curveFileName,4).c_str(),"w");
			FILE* cf=curveFile.getFilePtr();
			
			/* Write the curve file header: */
			fprintf(cf,"Vrui Curve Editor Tool Curve File\n");
			
			/* Write all curves: */
			fprintf(cf,"%u\n",(unsigned int)curves.size());
			for(std::vector<Curve*>::const_iterator cIt=curves.begin();cIt!=curves.end();++cIt)
				{
				const Curve* c=*cIt;
				
				/* Write the curve's line width and color: */
				fprintf(cf,"\n");
				fprintf(cf,"%4.1f, %03u %03u %03u\n",c->lineWidth,c->color[0],c->color[1],c->color[2]);
				
				/* Write the curve's control points: */
				fprintf(cf,"%u\n",(unsigned int)c->controlPoints.size());
				for(std::vector<Curve::ControlPoint>::const_iterator cpIt=c->controlPoints.begin();cpIt!=c->controlPoints.end();++cpIt)
					fprintf(cf,"%f, %f %f %f\n",cpIt->t,cpIt->pos[0],cpIt->pos[1],cpIt->pos[2]);
				}
			}
		catch(std::runtime_error err)
			{
			/* Show an error message: */
			showErrorMessage("Curve Editor","Could not create curve file; did not save curves");
			}
		}
	}
Example #6
0
    bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {

        // Steps
        // 1. check basic config
        // 2. extract params from command
        // 3. fast check
        // 4. slow check (LOCKS)

        // step 1

        lastError.disableForCommand();
        ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );

        // make sure we have the mongos id for writebacks
        if ( ! checkMongosID( info , cmdObj["serverID"] , errmsg ) )
            return false;

        bool authoritative = cmdObj.getBoolField( "authoritative" );

        // check config server is ok or enable sharding
        if ( ! checkConfigOrInit( cmdObj["configdb"].valuestrsafe() , authoritative , errmsg , result ) )
            return false;

        // check shard name/hosts are correct
        if ( cmdObj["shard"].type() == String ) {
            shardingState.gotShardName( cmdObj["shard"].String() );
            shardingState.gotShardHost( cmdObj["shardHost"].String() );
        }


        // Handle initial shard connection
        if( cmdObj["version"].eoo() && cmdObj["init"].trueValue() ) {
            result.append( "initialized", true );
            return true;
        }

        // we can run on a slave up to here
        if ( ! isMaster( "admin" ) ) {
            result.append( "errmsg" , "not master" );
            result.append( "note" , "from post init in setShardVersion" );
            return false;
        }

        // step 2

        string ns = cmdObj["setShardVersion"].valuestrsafe();
        if ( ns.size() == 0 ) {
            errmsg = "need to specify namespace";
            return false;
        }

        const ConfigVersion version = ConfigVersion( extractVersion( cmdObj["version"] , errmsg ), OID() );
        if ( errmsg.size() )
            return false;

        // step 3

        const ConfigVersion oldVersion = info->getVersion(ns);
        const ConfigVersion globalVersion = shardingState.getVersion(ns);

        oldVersion.addToBSON( result, "oldVersion" );

        if ( globalVersion.isSet() && version.isSet() ) {
            // this means there is no reset going on an either side
            // so its safe to make some assumptions

            if ( version.isEquivalentTo( globalVersion ) ) {
                // mongos and mongod agree!
                if ( ! oldVersion.isEquivalentTo( version ) ) {
                    if ( oldVersion < globalVersion ) {
                        info->setVersion( ns , version );
                    }
                    else if ( authoritative ) {
                        // this means there was a drop and our version is reset
                        info->setVersion( ns , version );
                    }
                    else {
                        result.append( "ns" , ns );
                        result.appendBool( "need_authoritative" , true );
                        errmsg = "verifying drop on '" + ns + "'";
                        return false;
                    }
                }
                return true;
            }

        }

        // step 4

        // this is because of a weird segfault I saw and I can't see why this should ever be set
        massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 );

        Lock::GlobalWrite setShardVersionLock; // TODO: can we get rid of this??

        if ( oldVersion.isSet() && ! globalVersion.isSet() ) {
            // this had been reset
            info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
        }

        if ( ! version.isSet() && ! globalVersion.isSet() ) {
            // this connection is cleaning itself
            info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
            return true;
        }

        if ( ! version.isSet() && globalVersion.isSet() ) {
            if ( ! authoritative ) {
                result.appendBool( "need_authoritative" , true );
                result.append( "ns" , ns );
                globalVersion.addToBSON( result, "globalVersion" );
                errmsg = "dropping needs to be authoritative";
                return false;
            }
            log() << "wiping data for: " << ns << endl;
            globalVersion.addToBSON( result, "beforeDrop" );
            // only setting global version on purpose
            // need clients to re-find meta-data
            shardingState.resetVersion( ns );
            info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
            return true;
        }

        if ( version < oldVersion ) {
            errmsg = "this connection already had a newer version of collection '" + ns + "'";
            result.append( "ns" , ns );
            version.addToBSON( result, "newVersion" );
            globalVersion.addToBSON( result, "globalVersion" );
            return false;
        }

        if ( version < globalVersion ) {
            while ( shardingState.inCriticalMigrateSection() ) {
                dbtemprelease r;
                sleepmillis(2);
                OCCASIONALLY log() << "waiting till out of critical section" << endl;
            }
            errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'";
            result.append( "ns" , ns );
            version.addToBSON( result, "version" );
            globalVersion.addToBSON( result, "globalVersion" );
            result.appendBool( "reloadConfig" , true );
            return false;
        }

        if ( ! globalVersion.isSet() && ! authoritative ) {
            // Needed b/c when the last chunk is moved off a shard, the version gets reset to zero, which
            // should require a reload.
            // TODO: Maybe a more elegant way of doing this
            while ( shardingState.inCriticalMigrateSection() ) {
                dbtemprelease r;
                sleepmillis(2);
                OCCASIONALLY log() << "waiting till out of critical section for version reset" << endl;
            }

            // need authoritative for first look
            result.append( "ns" , ns );
            result.appendBool( "need_authoritative" , true );
            errmsg = "first time for collection '" + ns + "'";
            return false;
        }

        Timer relockTime;
        {
            dbtemprelease unlock;

            ShardChunkVersion currVersion = version;
            if ( ! shardingState.trySetVersion( ns , currVersion ) ) {
                errmsg = str::stream() << "client version differs from config's for collection '" << ns << "'";
                result.append( "ns" , ns );
                version.addToBSON( result, "version" );
                globalVersion.addToBSON( result, "globalVersion" );
                return false;
            }
        }
        if ( relockTime.millis() >= ( cmdLine.slowMS - 10 ) ) {
            log() << "setShardVersion - relocking slow: " << relockTime.millis() << endl;
        }

        info->setVersion( ns , version );
        return true;
    }
Example #7
0
        bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {

            // Steps
            // 1. check basic config
            // 2. extract params from command
            // 3. fast check
            // 4. slow check (LOCKS)
            
            // step 1

            lastError.disableForCommand();
            ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );

            // make sure we have the mongos id for writebacks
            if ( ! checkMongosID( info , cmdObj["serverID"] , errmsg ) ) 
                return false;

            bool authoritative = cmdObj.getBoolField( "authoritative" );
            
            // check config server is ok or enable sharding
            if ( ! checkConfigOrInit( cmdObj["configdb"].valuestrsafe() , authoritative , errmsg , result ) )
                return false;

            // check shard name/hosts are correct
            if ( cmdObj["shard"].type() == String ) {
                shardingState.gotShardName( cmdObj["shard"].String() );
            }
            
            // Handle initial shard connection
            if( cmdObj["version"].eoo() && cmdObj["init"].trueValue() ){

                result.append( "initialized", true );

                // Send back wire version to let mongos know what protocol we can speak
                result.append( "minWireVersion", minWireVersion );
                result.append( "maxWireVersion", maxWireVersion );

                return true;
            }

            // we can run on a slave up to here
            if ( ! isMaster( "admin" ) ) {
                result.append( "errmsg" , "not master" );
                result.append( "note" , "from post init in setShardVersion" );
                return false;
            }

            // step 2
            
            string ns = cmdObj["setShardVersion"].valuestrsafe();
            if ( ns.size() == 0 ) {
                errmsg = "need to specify namespace";
                return false;
            }

            if( ! ChunkVersion::canParseBSON( cmdObj, "version" ) ){
                errmsg = "need to specify version";
                return false;
            }

            const ChunkVersion version = ChunkVersion::fromBSON( cmdObj, "version" );
            
            // step 3

            const ChunkVersion oldVersion = info->getVersion(ns);
            const ChunkVersion globalVersion = shardingState.getVersion(ns);

            oldVersion.addToBSON( result, "oldVersion" );
            
            if ( globalVersion.isSet() && version.isSet() ) {
                // this means there is no reset going on an either side
                // so its safe to make some assumptions

                if ( version.isWriteCompatibleWith( globalVersion ) ) {
                    // mongos and mongod agree!
                    if ( ! oldVersion.isWriteCompatibleWith( version ) ) {
                        if ( oldVersion < globalVersion &&
                             oldVersion.hasCompatibleEpoch(globalVersion) )
                        {
                            info->setVersion( ns , version );
                        }
                        else if ( authoritative ) {
                            // this means there was a drop and our version is reset
                            info->setVersion( ns , version );
                        }
                        else {
                            result.append( "ns" , ns );
                            result.appendBool( "need_authoritative" , true );
                            errmsg = "verifying drop on '" + ns + "'";
                            return false;
                        }
                    }
                    return true;
                }
                
            }

            // step 4
            
            // this is because of a weird segfault I saw and I can't see why this should ever be set
            massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 ); 
        
            if ( oldVersion.isSet() && ! globalVersion.isSet() ) {
                // this had been reset
                info->setVersion( ns , ChunkVersion( 0, OID() ) );
            }

            if ( ! version.isSet() && ! globalVersion.isSet() ) {
                // this connection is cleaning itself
                info->setVersion( ns , ChunkVersion( 0, OID() ) );
                return true;
            }

            // Cases below all either return OR fall-through to remote metadata reload.
            if ( version.isSet() || !globalVersion.isSet() ) {

                // Not Dropping

                // TODO: Refactor all of this
                if ( version < oldVersion && version.hasCompatibleEpoch( oldVersion ) ) {
                    errmsg = "this connection already had a newer version of collection '" + ns + "'";
                    result.append( "ns" , ns );
                    version.addToBSON( result, "newVersion" );
                    globalVersion.addToBSON( result, "globalVersion" );
                    return false;
                }

                // TODO: Refactor all of this
                if ( version < globalVersion && version.hasCompatibleEpoch( globalVersion ) ) {
                    while ( shardingState.inCriticalMigrateSection() ) {
                        log() << "waiting till out of critical section" << endl;
                        shardingState.waitTillNotInCriticalSection( 10 );
                    }
                    errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'";
                    result.append( "ns" , ns );
                    version.addToBSON( result, "version" );
                    globalVersion.addToBSON( result, "globalVersion" );
                    result.appendBool( "reloadConfig" , true );
                    return false;
                }

                if ( ! globalVersion.isSet() && ! authoritative ) {
                    // Needed b/c when the last chunk is moved off a shard, the version gets reset to zero, which
                    // should require a reload.
                    while ( shardingState.inCriticalMigrateSection() ) {
                        log() << "waiting till out of critical section" << endl;
                        shardingState.waitTillNotInCriticalSection( 10 );
                    }

                    // need authoritative for first look
                    result.append( "ns" , ns );
                    result.appendBool( "need_authoritative" , true );
                    errmsg = "first time for collection '" + ns + "'";
                    return false;
                }

                // Fall through to metadata reload below
            }
            else {

                // Dropping

                if ( ! authoritative ) {
                    result.appendBool( "need_authoritative" , true );
                    result.append( "ns" , ns );
                    globalVersion.addToBSON( result, "globalVersion" );
                    errmsg = "dropping needs to be authoritative";
                    return false;
                }

                // Fall through to metadata reload below
            }

            ChunkVersion currVersion;
            Status status = shardingState.refreshMetadataIfNeeded( ns, version, &currVersion );

            if (!status.isOK()) {

                // The reload itself was interrupted or confused here

                errmsg = str::stream() << "could not refresh metadata for " << ns
                                       << " with requested shard version " << version.toString()
                                       << ", stored shard version is " << currVersion.toString()
                                       << causedBy( status.reason() );

                warning() << errmsg << endl;

                result.append( "ns" , ns );
                version.addToBSON( result, "version" );
                currVersion.addToBSON( result, "globalVersion" );
                result.appendBool( "reloadConfig", true );

                return false;
            }
            else if ( !version.isWriteCompatibleWith( currVersion ) ) {

                // We reloaded a version that doesn't match the version mongos was trying to
                // set.

                errmsg = str::stream() << "requested shard version differs from"
                                       << " config shard version for " << ns
                                       << ", requested version is " << version.toString()
                                       << " but found version " << currVersion.toString();

                OCCASIONALLY warning() << errmsg << endl;

                // WARNING: the exact fields below are important for compatibility with mongos
                // version reload.

                result.append( "ns" , ns );
                currVersion.addToBSON( result, "globalVersion" );

                // If this was a reset of a collection or the last chunk moved out, inform mongos to
                // do a full reload.
                if (currVersion.epoch() != version.epoch() || !currVersion.isSet() ) {
                    result.appendBool( "reloadConfig", true );
                    // Zero-version also needed to trigger full mongos reload, sadly
                    // TODO: Make this saner, and less impactful (full reload on last chunk is bad)
                    ChunkVersion( 0, 0, OID() ).addToBSON( result, "version" );
                    // For debugging
                    version.addToBSON( result, "origVersion" );
                }
                else {
                    version.addToBSON( result, "version" );
                }

                return false;
            }

            info->setVersion( ns , version );
            return true;
        }
void Foam::processorTetPolyPatchFaceDecomp::calcMeshPoints() const
{
    if (meshPointsPtr_)
    {
        FatalErrorIn
        (
            "void processorTetPolyPatchFaceDecomp::calcMeshPoints() const"
        )   << "meshPointsPtr_ already allocated"
            << abort(FatalError);
    }

    // Algorithm:
    // Depending on whether the patch is a master or a slave, get the primitive
    // patch points and filter away the points from the global patch.

    labelList mp(0);

    if (isMaster())
    {
        mp = procPolyPatch_.meshPoints();
    }
    else
    {
        // Slave side.  Create the reversed patch and pick up its points
        // so that the order is correct
        const polyPatch& pp = patch();

        faceList masterFaces(pp.size());

        forAll (pp, faceI)
        {
            masterFaces[faceI] = pp[faceI].reverseFace();
        }

        mp = primitiveFacePatch
            (
                masterFaces,
                pp.points()
            ).meshPoints();
    }

    // Get reference to shared processor points
    const labelList& sharedPoints =
        boundaryMesh().globalPatch().meshPoints();

    // Filter the shared points out of the list
    meshPointsPtr_ = new labelList(mp.size() + procPolyPatch_.size());
    labelList& filtPoints = *meshPointsPtr_;

    label noFiltPoints = 0;

    forAll (mp, pointI)
    {
        label curP = mp[pointI];

        bool found = false;
        forAll (sharedPoints, sharedI)
        {
            if (sharedPoints[sharedI] == curP)
            {
                found = true;
                break;
            }
        }

        if (!found)
        {
            filtPoints[noFiltPoints] = curP;
            noFiltPoints++;
        }
    }
Example #9
0
void StyleEngine::notifyPendingStyleSheetAdded()
{
    ASSERT(isMaster());
    m_pendingStylesheets++;
}
Example #10
0
result_t loadScanConfigFile(MessageMap* messages, unsigned char address, SymbolString& data, string& relativeFile)
{
	PartType partType;
	if (isMaster(address)) {
		address = (unsigned char)(data[0]+5); // slave address of sending master
		partType = pt_masterData;
		if (data.size()<5+1+5+2+2) // skip QQ ZZ PB SB NN
			return RESULT_EMPTY;
	} else {
		partType = pt_slaveData;
		if (data.size()<1+1+5+2+2) // skip NN
			return RESULT_EMPTY;
	}
	DataFieldSet* identFields = DataFieldSet::getIdentFields();
	// MANUFACTURER/ZZ. ( C.S.H., C.H., C.S., S.H., C., S., H., "" ) .*csv
	string path, prefix, ident, sw, hw; // path: cfgpath/MANUFACTURER, prefix: ZZ., ident: C[C[C[C[C]]]], sw: xxxx, hw: xxxx
	ostringstream out;
	unsigned char offset = 0;
	size_t field = 0;
	result_t result = (*identFields)[field]->read(partType, data, offset, out, 0); // manufacturer name
	if (result==RESULT_ERR_NOTFOUND)
		result = (*identFields)[field]->read(partType, data, offset, out, OF_NUMERIC); // manufacturer name
	if (result==RESULT_OK) {
		path = out.str();
		transform(path.begin(), path.end(), path.begin(), ::tolower);
		path = string(opt.configPath) + "/" + path;
		out.str("");
		out << setw(2) << hex << setfill('0') << nouppercase << static_cast<unsigned>(address) << ".";
		prefix = out.str();
		out.str("");
		out.clear();
		offset = (unsigned char)(offset+(*identFields)[field++]->getLength(partType));
		result = (*identFields)[field]->read(partType, data, offset, out, 0); // identification string
	}
	if (result==RESULT_OK) {
		ident = out.str();
		out.str("");
		offset = (unsigned char)(offset+(*identFields)[field++]->getLength(partType));
		result = (*identFields)[field]->read(partType, data, offset, out, 0); // software version number
	}
	if (result==RESULT_OK) {
		sw = out.str();
		out.str("");
		offset = (unsigned char)(offset+(*identFields)[field++]->getLength(partType));
		result = (*identFields)[field]->read(partType, data, offset, out, 0); // hardware version number
	}
	if (result!=RESULT_OK) {
		logDebug(lf_main, "load scan config files: %s", getResultCode(result));
		return result;
	}
	vector<string> files;
	bool hasTemplates = false;
	if (result==RESULT_OK) {
		hw = out.str();
		result = collectConfigFiles(path, prefix, ".csv", files, NULL, &hasTemplates);
	}
	logDebug(lf_main, "found %d matching scan config files from %s with prefix %s: %s", files.size(), path.c_str(), prefix.c_str(), getResultCode(result));
	if (result!=RESULT_OK)
		return result;
	if (files.empty())
		return RESULT_ERR_NOTFOUND;

	// complete name: cfgpath/MANUFACTURER/ZZ[.C[C[C[C[C]]]]][.SWxxxx][.HWxxxx][.*].csv
	for (string::iterator it = ident.begin(); it!=ident.end(); it++) {
		if (::isspace(*it)) {
			ident.erase(it--);
		} else {
			*it = (char)::tolower(*it);
		}
	}
	size_t prefixLen = path.length()+1+prefix.length()-1;
	size_t bestMatch = 0;
	string best;
	for (vector<string>::iterator it = files.begin(); it!=files.end(); it++) {
		string name = *it;
		name = name.substr(prefixLen, name.length()-prefixLen+1-strlen(".csv")); // .*.
		size_t match = 1;
		if (name.length()>2) { // more than just "."
			size_t pos = name.rfind(".SW"); // check for ".SWxxxx."
			if (pos!=string::npos && name.find(".", pos+1)==pos+7) {
				if (name.substr(pos+3, 4)==sw)
					match += 6;
				else {
					continue; // SW mismatch
				}
			}
			pos = name.rfind(".HW"); // check for ".HWxxxx."
			if (pos!=string::npos && name.find(".", pos+1)==pos+7) {
				if (name.substr(pos+3, 4)==hw)
					match += 6;
				else {
					continue; // HW mismatch
				}
			}
			pos = name.find(".", 1); // check for ".C[C[C[C[C]]]]."
			if (ident.length()>0 && pos!=string::npos && pos>1 && pos<=6) { // up to 5 chars between two "."s, immediately after "ZZ."
				string check = name.substr(1, pos-1);
				string remain = ident;
				bool matches = false;
				while (remain.length()>0 && remain.length()>=check.length()) {
					if (check==remain) {
						matches = true;
						break;
					}
					if (remain[remain.length()-1]!='0')
						break;
					remain.erase(remain.length()-1);
				}
				if (matches)
					match += remain.length();
				else {
					continue; // IDENT mismatch
				}
			}
		}
		if (match>=bestMatch) {
			bestMatch = match;
			best = *it;
		}
	}

	if (best.length()==0)
		return RESULT_ERR_NOTFOUND;

	// found the right file. load the templates if necessary, then load the file itself
	bool readCommon = false;
	DataFieldTemplates* templates = getTemplates(path, ".csv", hasTemplates, false, &readCommon);
	if (readCommon) {
		result = collectConfigFiles(path, "", ".csv", files);
		if (result==RESULT_OK && !files.empty()) {
			for (vector<string>::iterator it = files.begin(); it!=files.end(); it++) {
				string name = *it;
				name = name.substr(path.length()+1, name.length()-path.length()-strlen(".csv")); // *.
				if (name=="_templates.") // skip templates
					continue;
				if (name.length()<3 || name.find_first_of('.')!=2) { // different from the scheme "ZZ."
					name = *it;
					result = messages->readFromFile(name, templates);
					if (result==RESULT_OK)
						logNotice(lf_main, "read common config file %s for scan %s", name.c_str(), ident.c_str());
					else
						logError(lf_main, "error reading common config file %s for scan %s: %s", name.c_str(), ident.c_str(), getResultCode(result));
				}
			}
		}
	}
	result = messages->readFromFile(best, templates);
	if (result!=RESULT_OK) {
		logError(lf_main, "error reading config file %s for scan %s: %s", best.c_str(), ident.c_str(), getResultCode(result));
		return result;
	}
	logNotice(lf_main, "read config file %s for scan %s", best.c_str(), ident.c_str());
	result = messages->resolveConditions(false);
	if (result != RESULT_OK)
		logError(lf_main, "error resolving conditions: %s, %s", getResultCode(result), messages->getLastError().c_str());

	logNotice(lf_main, "found messages: %d (%d conditional on %d conditions, %d poll, %d update)", messages->size(), messages->sizeConditional(), messages->sizeConditions(), messages->sizePoll(), messages->sizePassive());
	relativeFile = best.substr(strlen(opt.configPath)+1);
	return RESULT_OK;
}
Example #11
0
    // throws DBException
    void buildAnIndex( Collection* collection,
                       IndexCatalogEntry* btreeState,
                       bool mayInterrupt ) {

        string ns = collection->ns().ns(); // our copy
        const IndexDescriptor* idx = btreeState->descriptor();
        const BSONObj& idxInfo = idx->infoObj();

        MONGO_TLOG(0) << "build index on: " << ns
                      << " properties: " << idx->toString() << endl;
        audit::logCreateIndex( currentClient.get(), &idxInfo, idx->indexName(), ns );

        Timer t;

        verify( Lock::isWriteLocked( ns ) );
        // this is so that people know there are more keys to look at when doing
        // things like in place updates, etc...
        collection->infoCache()->addedIndex();

        if ( collection->numRecords() == 0 ) {
            Status status = btreeState->accessMethod()->initializeAsEmpty();
            massert( 17343,
                     str::stream() << "IndexAccessMethod::initializeAsEmpty failed" << status.toString(),
                     status.isOK() );
            MONGO_TLOG(0) << "\t added index to empty collection";
            return;
        }

        scoped_ptr<BackgroundOperation> backgroundOperation;
        bool doInBackground = false;

        if ( idxInfo["background"].trueValue() && !inDBRepair ) {
            doInBackground = true;
            backgroundOperation.reset( new BackgroundOperation(ns) );
            uassert( 13130,
                     "can't start bg index b/c in recursive lock (db.eval?)",
                     !Lock::nested() );
            log() << "\t building index in background";
        }

        Status status = btreeState->accessMethod()->initializeAsEmpty();
        massert( 17342,
                 str::stream()
                 << "IndexAccessMethod::initializeAsEmpty failed"
                 << status.toString(),
                 status.isOK() );

        IndexAccessMethod* bulk = doInBackground ? NULL : btreeState->accessMethod()->initiateBulk();
        IndexAccessMethod* iam = bulk ? bulk : btreeState->accessMethod();

        if ( bulk )
            log() << "\t building index using bulk method";

        unsigned long long n = addExistingToIndex( collection,
                                                   btreeState->descriptor(),
                                                   iam,
                                                   doInBackground );

        if ( bulk ) {
            LOG(1) << "\t bulk commit starting";
            std::set<DiskLoc> dupsToDrop;

            Status status = btreeState->accessMethod()->commitBulk( bulk,
                                                                    mayInterrupt,
                                                                    &dupsToDrop );
            massert( 17398,
                     str::stream() << "commitBulk failed: " << status.toString(),
                     status.isOK() );

            if ( dupsToDrop.size() )
                log() << "\t bulk dropping " << dupsToDrop.size() << " dups";

            for( set<DiskLoc>::const_iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); ++i ) {
                BSONObj toDelete;
                collection->deleteDocument( *i,
                                            false /* cappedOk */,
                                            true /* noWarn */,
                                            &toDelete );
                if ( isMaster( ns.c_str() ) ) {
                    logOp( "d", ns.c_str(), toDelete );
                }
                
                getDur().commitIfNeeded();

                RARELY if ( mayInterrupt ) {
                    killCurrentOp.checkForInterrupt();
                }
            }
        }

        verify( !btreeState->head().isNull() );
        MONGO_TLOG(0) << "build index done.  scanned " << n << " total records. "
                      << t.millis() / 1000.0 << " secs" << endl;

        // this one is so people know that the index is finished
        collection->infoCache()->addedIndex();
    }
Example #12
0
bool pRPL::Process::
grouping(int nGroups,
         bool incldMaster,
         Process *pGrpedPrc,
         Process *pGrpMaster) const {
  if(!initialized()) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error: Process has NOT been initialized," \
         << " unable to be grouped" << endl;
    return false;
  }

  if(!active()) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error: inactive Process," \
         << " unable to group a Null communicator." \
         << " id = " << _id << " nTotPrcs = " << _nTotalPrcs << endl;
    return false;
  }

  if(nGroups <= 0 ||
     nGroups > _nTotalPrcs) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error: invalid number of groups (" \
         << nGroups << ") as the total number of processes is " \
         << _nTotalPrcs << endl;
    return false;
  }

  if(!incldMaster && _nTotalPrcs <= 1) {
    cerr << __FILE__ << " " << __FUNCTION__ \
         << " Error:  " << _nTotalPrcs << " processes can NOT" \
         << " be grouped without the master process" << endl;
    return false;
  }

  MPI_Group glbGrp;
  MPI_Comm glbComm = _comm;
  MPI_Comm_group(glbComm, &glbGrp);
  int myID = -1;
  int grpID = -1;
  MPI_Comm grpComm = MPI_COMM_NULL;

  if(incldMaster) {
    myID = _id;
    grpID = myID % nGroups;
    MPI_Comm_split(glbComm, grpID, myID, &grpComm);
    if(!pGrpedPrc->set(grpComm, _hasWriter, grpID)) {
      return false;
    }
    if(pGrpMaster != NULL) {
      MPI_Group masterGrp= MPI_GROUP_NULL;
      MPI_Comm masterComm = MPI_COMM_NULL;
      int grpMasterRange[1][3] = {{0, nGroups-1, 1}};
      MPI_Group_range_incl(glbGrp, 1, grpMasterRange, &masterGrp);
      MPI_Comm_create(glbComm, masterGrp, &masterComm);
      if(!pGrpMaster->set(masterComm)) {
        return false;
      }
    }
  }
  else {
    int excldRanks[1] = {0};
    MPI_Group glbGrp2 = MPI_GROUP_NULL;
    MPI_Group_excl(glbGrp, 1, excldRanks, &glbGrp2);
    MPI_Comm_create(_comm, glbGrp2, &glbComm);
    glbGrp = glbGrp2;
    if(!isMaster()) {
      MPI_Comm_rank(glbComm, &myID);
      grpID = myID % nGroups;
      MPI_Comm_split(glbComm, grpID, myID, &grpComm);
      if(!pGrpedPrc->set(grpComm, _hasWriter, grpID)) {
        return false;
      }
      if(pGrpMaster != NULL) {
        MPI_Group masterGrp= MPI_GROUP_NULL;
        MPI_Comm masterComm = MPI_COMM_NULL;
        int grpMasterRange[1][3] = {{0, nGroups-1, 1}};
        MPI_Group_range_incl(glbGrp, 1, grpMasterRange, &masterGrp);
        MPI_Comm_create(glbComm, masterGrp, &masterComm);
        if(!pGrpMaster->set(masterComm)) {
          return false;
        }
      }
    }
  }

  return true;
}
Example #13
0
void HWMapContainer::showSeedPrompt()
{
    SeedPrompt prompt(parentWidget()->parentWidget(), getCurrentSeed(), isMaster());
    connect(&prompt, SIGNAL(seedSelected(const QString &)), this, SLOT(setNewSeed(const QString &)));
    prompt.exec();
}
Example #14
0
bool Score::read(XmlReader& e)
      {
      while (e.readNextStartElement()) {
            e.setTrack(-1);
            const QStringRef& tag(e.name());
            if (tag == "Staff")
                  readStaff(e);
            else if (tag == "Omr") {
#ifdef OMR
                  masterScore()->setOmr(new Omr(this));
                  masterScore()->omr()->read(e);
#else
                  e.skipCurrentElement();
#endif
                  }
            else if (tag == "Audio") {
                  _audio = new Audio;
                  _audio->read(e);
                  }
            else if (tag == "showOmr")
                  masterScore()->setShowOmr(e.readInt());
            else if (tag == "playMode")
                  _playMode = PlayMode(e.readInt());
            else if (tag == "LayerTag") {
                  int id = e.intAttribute("id");
                  const QString& t = e.attribute("tag");
                  QString val(e.readElementText());
                  if (id >= 0 && id < 32) {
                        _layerTags[id] = t;
                        _layerTagComments[id] = val;
                        }
                  }
            else if (tag == "Layer") {
                  Layer layer;
                  layer.name = e.attribute("name");
                  layer.tags = e.attribute("mask").toUInt();
                  _layer.append(layer);
                  e.readNext();
                  }
            else if (tag == "currentLayer")
                  _currentLayer = e.readInt();
            else if (tag == "Synthesizer")
                  _synthesizerState.read(e);
            else if (tag == "page-offset")
                  _pageNumberOffset = e.readInt();
            else if (tag == "Division")
                  _fileDivision = e.readInt();
            else if (tag == "showInvisible")
                  _showInvisible = e.readInt();
            else if (tag == "showUnprintable")
                  _showUnprintable = e.readInt();
            else if (tag == "showFrames")
                  _showFrames = e.readInt();
            else if (tag == "showMargins")
                  _showPageborders = e.readInt();
            else if (tag == "Style") {
                  qreal sp = style().value(Sid::spatium).toDouble();
                  style().load(e);
                  // if (_layoutMode == LayoutMode::FLOAT || _layoutMode == LayoutMode::SYSTEM) {
                  if (_layoutMode == LayoutMode::FLOAT) {
                        // style should not change spatium in
                        // float mode
                        style().set(Sid::spatium, sp);
                        }
                  _scoreFont = ScoreFont::fontFactory(style().value(Sid::MusicalSymbolFont).toString());
                  }
            else if (tag == "copyright" || tag == "rights") {
                  Text* text = new Text(this);
                  text->read(e);
                  setMetaTag("copyright", text->xmlText());
                  delete text;
                  }
            else if (tag == "movement-number")
                  setMetaTag("movementNumber", e.readElementText());
            else if (tag == "movement-title")
                  setMetaTag("movementTitle", e.readElementText());
            else if (tag == "work-number")
                  setMetaTag("workNumber", e.readElementText());
            else if (tag == "work-title")
                  setMetaTag("workTitle", e.readElementText());
            else if (tag == "source")
                  setMetaTag("source", e.readElementText());
            else if (tag == "metaTag") {
                  QString name = e.attribute("name");
                  setMetaTag(name, e.readElementText());
                  }
            else if (tag == "Part") {
                  Part* part = new Part(this);
                  part->read(e);
                  _parts.push_back(part);
                  }
            else if ((tag == "HairPin")
                || (tag == "Ottava")
                || (tag == "TextLine")
                || (tag == "Volta")
                || (tag == "Trill")
                || (tag == "Slur")
                || (tag == "Pedal")) {
                  Spanner* s = toSpanner(Element::name2Element(tag, this));
                  s->read(e);
                  addSpanner(s);
                  }
            else if (tag == "Excerpt") {
                  if (MScore::noExcerpts)
                        e.skipCurrentElement();
                  else {
                        if (isMaster()) {
                              Excerpt* ex = new Excerpt(static_cast<MasterScore*>(this));
                              ex->read(e);
                              excerpts().append(ex);
                              }
                        else {
                              qDebug("Score::read(): part cannot have parts");
                              e.skipCurrentElement();
                              }
                        }
                  }
            else if (e.name() == "Tracklist") {
                  int strack = e.intAttribute("sTrack",   -1);
                  int dtrack = e.intAttribute("dstTrack", -1);
                  if (strack != -1 && dtrack != -1)
                        e.tracks().insert(strack, dtrack);
                  e.skipCurrentElement();
                  }
            else if (tag == "Score") {          // recursion
                  if (MScore::noExcerpts)
                        e.skipCurrentElement();
                  else {
                        e.tracks().clear();     // ???
                        MasterScore* m = masterScore();
                        Score* s       = new Score(m, MScore::baseStyle());
                        Excerpt* ex    = new Excerpt(m);

                        ex->setPartScore(s);
                        e.setLastMeasure(nullptr);
                        s->read(e);
                        ex->setTracks(e.tracks());
                        m->addExcerpt(ex);
                        }
                  }
            else if (tag == "name") {
                  QString n = e.readElementText();
                  if (!isMaster()) //ignore the name if it's not a child score
                        excerpt()->setTitle(n);
                  }
            else if (tag == "layoutMode") {
                  QString s = e.readElementText();
                  if (s == "line")
                        _layoutMode = LayoutMode::LINE;
                  else if (s == "system")
                        _layoutMode = LayoutMode::SYSTEM;
                  else
                        qDebug("layoutMode: %s", qPrintable(s));
                  }
            else
                  e.unknown();
            }
      e.reconnectBrokenConnectors();
      if (e.error() != QXmlStreamReader::NoError) {
            qDebug("%s: xml read error at line %lld col %lld: %s",
               qPrintable(e.getDocName()), e.lineNumber(), e.columnNumber(),
               e.name().toUtf8().data());
            MScore::lastError = QObject::tr("XML read error at line %1, column %2: %3").arg(e.lineNumber()).arg(e.columnNumber()).arg(e.name().toString());
            return false;
            }

      connectTies();

      _fileDivision = MScore::division;

#if 0 // TODO:barline
      //
      //    sanity check for barLineSpan
      //
      for (Staff* st : staves()) {
            int barLineSpan = st->barLineSpan();
            int idx = st->idx();
            int n   = nstaves();
            if (idx + barLineSpan > n) {
                  qDebug("bad span: idx %d  span %d staves %d", idx, barLineSpan, n);
                  // span until last staff
                  barLineSpan = n - idx;
                  st->setBarLineSpan(barLineSpan);
                  }
            else if (idx == 0 && barLineSpan == 0) {
                  qDebug("bad span: idx %d  span %d staves %d", idx, barLineSpan, n);
                  // span from the first staff until the start of the next span
                  barLineSpan = 1;
                  for (int i = 1; i < n; ++i) {
                        if (staff(i)->barLineSpan() == 0)
                              ++barLineSpan;
                        else
                              break;
                        }
                  st->setBarLineSpan(barLineSpan);
                  }
            // check spanFrom
            int minBarLineFrom = st->lines(0) == 1 ? BARLINE_SPAN_1LINESTAFF_FROM : MIN_BARLINE_SPAN_FROMTO;
            if (st->barLineFrom() < minBarLineFrom)
                  st->setBarLineFrom(minBarLineFrom);
            if (st->barLineFrom() > st->lines(0) * 2)
                  st->setBarLineFrom(st->lines(0) * 2);
            // check spanTo
            Staff* stTo = st->barLineSpan() <= 1 ? st : staff(idx + st->barLineSpan() - 1);
            // 1-line staves have special bar line spans
            int maxBarLineTo        = stTo->lines(0) == 1 ? BARLINE_SPAN_1LINESTAFF_TO : stTo->lines(0) * 2;
            if (st->barLineTo() < MIN_BARLINE_SPAN_FROMTO)
                  st->setBarLineTo(MIN_BARLINE_SPAN_FROMTO);
            if (st->barLineTo() > maxBarLineTo)
                  st->setBarLineTo(maxBarLineTo);
            // on single staff span, check spanFrom and spanTo are distant enough
            if (st->barLineSpan() == 1) {
                  if (st->barLineTo() - st->barLineFrom() < MIN_BARLINE_FROMTO_DIST) {
                        st->setBarLineFrom(0);
                        st->setBarLineTo(0);
                        }
                  }
            }
#endif

      if (!masterScore()->omr())
            masterScore()->setShowOmr(false);

      fixTicks();
      masterScore()->rebuildMidiMapping();
      masterScore()->updateChannel();
//      createPlayEvents();
      return true;
      }
bool ExpandableBlockStreamExchangeEpoll::open(const PartitionOffset& partition_offset)
{
	unsigned long long int start = curtick();

	RegisterExpandedThreadToAllBarriers();

	if (tryEntryIntoSerializedSection())
	{
		debug_winner_thread++;


		nexhausted_lowers=0;
		this->partition_offset=partition_offset;
		nlowers=state.lower_id_list_.size();

		for (unsigned i = 0; i < nlowers; i++)
		{
			debug_received_block[i] = 0;
		}

		socket_fd_lower_list = new int[nlowers];
		//init -1 ---Yu
		for (int i = 0; i < nlowers; ++i) {
			socket_fd_lower_list[i] = -1;
		}
		buffer=new BlockStreamBuffer(state.block_size_,BUFFER_SIZE_IN_EXCHANGE,state.schema_);
		ExpanderTracker::getInstance()->addNewStageEndpoint(pthread_self(),LocalStageEndPoint(stage_src,"Exchange",buffer));
		received_block_stream_=BlockStreamBase::createBlock(state.schema_,state.block_size_);

		block_for_socket_ = new BlockContainer*[nlowers];
		for (unsigned i = 0; i < nlowers; i++)
		{
			block_for_socket_[i] = new BlockContainer(received_block_stream_->getSerializedBlockSize());
		}

		if (PrepareTheSocket() == false)
			return false;

		if (SetSocketNonBlocking(sock_fd) == false)
		{
			return false;
		}

		logging_->log("[%ld,%d] Open: nexhausted lowers=%d, nlower=%d", state.exchange_id_, partition_offset, nexhausted_lowers, nlowers);

		if (RegisterExchange() == false)
		{
			logging_->elog("Register Exchange with ID=%d fails!", state.exchange_id_);
		}

		if(isMaster()){
			/*  According to a bug reported by dsc, the master exchangeupper should check whether other
			 *  uppers have registered to exchangeTracker. Otherwise, the lower may fail to connect to the
			 *  exchangeTracker of some uppers when the lower nodes receive the exchagnelower, as some uppers
			 *  have not register the exchange_id to the exchangeTracker.
			 */
			logging_->log("[%ld,%d] Synchronizing....", state.exchange_id_, partition_offset);
			checkOtherUpperRegistered();
			logging_->log("[%ld,%d] Synchronized!", state.exchange_id_, partition_offset);
			logging_->log("[%ld,%d] This exchange is the master one, serialize the iterator subtree to the children...", state.exchange_id_, partition_offset);

			if (SerializeAndSendToMulti() == false)
				return false;
		}

		if (CreateReceiverThread() == false)
		{
			return false;
		}

		createPerformanceInfo();

	}

	/* A synchronization barrier, in case of multiple expanded threads*/
	barrierArrive();
	return true;
}
Example #16
0
void MasterSlaveCommunicator::releaseSlaves()
{
    if (_performing) throw FATALERROR("Still performing tasks");
    if (isMultiProc() && _acquired && isMaster()) stop_obeying();
    _acquired = false;
}
Example #17
0
    // throws DBException
    unsigned long long fastBuildIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
        CurOp * op = cc().curop();

        Timer t;

        tlog(1) << "fastBuildIndex " << ns << " idxNo:" << idxNo << ' ' << idx.info.obj().toString() << endl;

        bool dupsAllowed = !idx.unique();
        bool dropDups = idx.dropDups() || inDBRepair;
        BSONObj order = idx.keyPattern();

        getDur().writingDiskLoc(idx.head).Null();

        if ( logLevel > 1 ) printMemInfo( "before index start" );

        /* get and sort all the keys ----- */
        ProgressMeterHolder pm( op->setMessage( "index: (1/3) external sort" , d->stats.nrecords , 10 ) );
        SortPhaseOne _ours;
        SortPhaseOne *phase1 = precalced;
        if( phase1 == 0 ) {
            phase1 = &_ours;
            SortPhaseOne& p1 = *phase1;
            shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
            p1.sorter.reset( new BSONObjExternalSorter(idx.idxInterface(), order) );
            p1.sorter->hintNumObjects( d->stats.nrecords );
            const IndexSpec& spec = idx.getSpec();
            while ( c->ok() ) {
                BSONObj o = c->current();
                DiskLoc loc = c->currLoc();
                p1.addKeys(spec, o, loc);
                c->advance();
                pm.hit();
                if ( logLevel > 1 && p1.n % 10000 == 0 ) {
                    printMemInfo( "\t iterating objects" );
                }
            };
        }
        pm.finished();

        BSONObjExternalSorter& sorter = *(phase1->sorter);

        if( phase1->multi )
            d->setIndexIsMultikey(ns, idxNo);

        if ( logLevel > 1 ) printMemInfo( "before final sort" );
        phase1->sorter->sort();
        if ( logLevel > 1 ) printMemInfo( "after final sort" );

        log(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl;

        set<DiskLoc> dupsToDrop;

        /* build index --- */
        if( idx.version() == 0 )
            buildBottomUpPhases2And3<V0>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t);
        else if( idx.version() == 1 ) 
            buildBottomUpPhases2And3<V1>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t);
        else
            verify(false);

        if( dropDups ) 
            log() << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl;

        for( set<DiskLoc>::iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); i++ ){
            theDataFileMgr.deleteRecord( ns, i->rec(), *i, false /* cappedOk */ , true /* noWarn */ , isMaster( ns ) /* logOp */ );
            getDur().commitIfNeeded();
        }

        return phase1->n;
    }
Example #18
0
int main_controller_tick(int state){
    static unsigned short main_hold_time = 0;
    switch(state){//main_task transitions/actions
        case st_main_start:
            state = st_main_init;
            break;
        case st_main_init:
            SPI_Master_Init();
            PCD_Init();
            state = st_main_wait_new;
            QueueEnqueue(lcd_command_queue, lcd_write_ready);
            break;
        case st_main_wait_new:
            if(PICC_IsNewCardPresent()){
                storeTagID();
                PICC_HaltA();
                state = st_main_checkID;
            } else {
                state = st_main_wait_new;
            }
            break;
        case st_main_checkID:
            if(isMaster(tag_id)){
                state = st_main_master;
                QueueEnqueue(lcd_command_queue, lcd_write_master);
            } else if(search_valid_tag(tag_id) != -1){
                state = st_main_hold;
                main_hold_time = 0;
                if(current_lock_position == locked || current_lock_position == malfunction){
                    QueueEnqueue(lock_command_queue, lc_unlock);
                } else if(current_lock_position == unlocked){
                    QueueEnqueue(lock_command_queue, lc_lock);
                }
            } else {
                state = st_main_hold;
                main_hold_time = 0;
                QueueEnqueue(lcd_command_queue, lcd_write_invalid);
            }
            break;
        case st_main_master:
            if(!(PIND & 0x10)){
                if(isTagDBFull()){
                    state = st_main_hold;
                    main_hold_time = 0;
                    QueueEnqueue(lcd_command_queue, lcd_write_add_full);
                } else {
                    state = st_main_add_pressed;
                    QueueEnqueue(lcd_command_queue, lcd_write_add);
                }                    
            } else if(!(PIND & 0x40)){
                if(isTagDBEmpty()){
                    state = st_main_hold;
                    main_hold_time = 0;
                    QueueEnqueue(lcd_command_queue, lcd_write_remove_empty);
                } else {
                    state = st_main_rm_pressed;
                    current_remove_option = 0;
                    QueueEnqueue(lcd_command_queue, lcd_write_remove);
                }                    
            } else {
                state = st_main_master;
            }
            break;
        case st_main_add_pressed:
            if(PIND & 0x10){
                state = st_main_add_tag;
            } else {
                state = st_main_add_pressed;
            }
            break;
        case st_main_add_tag:
            if(PICC_IsNewCardPresent()){
                storeTagID();
                PICC_HaltA();
                if(isMaster(tag_id)){
                    state = st_main_add_tag;
                } else if(search_valid_tag(tag_id) == -1){
                    state = st_main_hold;
                    main_hold_time = 0;
                    add_valid_tag(tag_id);
                    QueueEnqueue(lcd_command_queue, lcd_write_added);
                } else {
                    state = st_main_hold;
                    main_hold_time = 0;
                    QueueEnqueue(lcd_command_queue, lcd_write_add_exist);
                }
            } else {
                state = st_main_add_tag;
            }
            break;
        case st_main_rm_pressed:
            if(PIND & 0x40){
                state = st_main_rm_tag;
            } else {
                state = st_main_rm_pressed;
            }
            break;
        case st_main_rm_tag:
            if(!(PIND & 0x10)){
                current_remove_option = (current_remove_option + (numValidKeys - 1)) % numValidKeys;
                QueueEnqueue(lcd_command_queue, lcd_write_remove);
                state = st_main_lr_pressed;
            } else if(!(PIND & 0x40)){
                current_remove_option = (current_remove_option + 1) % numValidKeys;
                QueueEnqueue(lcd_command_queue, lcd_write_remove);
                state = st_main_lr_pressed;
            } else if(!(PIND & 0x20)){
                remove_valid_tag(current_remove_option);
                QueueEnqueue(lcd_command_queue, lcd_write_removed);
                state = st_main_hold;
                main_hold_time = 0;
            } else {
                state = st_main_rm_tag;
            }
            break;
        case st_main_lr_pressed:
            if( (PIND & 0x10) && (PIND & 0x40) ){
                state = st_main_rm_tag;
            } else {
                state = st_main_lr_pressed;
            }
            break;
        case st_main_hold:
            if(main_hold_time < 20){
                main_hold_time++;
                state = st_main_hold;
            } else {
                state = st_main_wait_new;
            }
            break;
        default:
            state = st_main_start;
            break;
    }//end main_task transitions/actions
    return state;
};
Example #19
0
/**
 * The program argument parsing function.
 * @param key the key from @a argpoptions.
 * @param arg the option argument, or NULL.
 * @param state the parsing state.
 */
error_t parse_opt(int key, char *arg, struct argp_state *state)
{
	struct options *opt = (struct options*)state->input;
	result_t result = RESULT_OK;
	switch (key) {

	// Device options:
	case 'd': // --device=/dev/ttyUSB0
		if (arg == NULL || arg[0] == 0) {
			argp_error(state, "invalid device");
			return EINVAL;
		}
		opt->device = arg;
		break;
	case 'n': // --nodevicecheck
		opt->noDeviceCheck = true;
		break;
	case 'r': // --readonly
		opt->readonly = true;
		if (opt->scanConfig || opt->answer || opt->generateSyn) {
			argp_error(state, "cannot combine readonly with scanconfig/answer/generatesyn");
			return EINVAL;
		}
		break;

	// Message configuration options:
	case 'c': // --configpath=/etc/ebusd
		if (arg == NULL || arg[0] == 0 || strcmp("/", arg) == 0) {
			argp_error(state, "invalid configpath");
			return EINVAL;
		}
		opt->configPath = arg;
		break;
	case 's': // --scanconfig
		opt->scanConfig = true;
		if (opt->readonly) {
			argp_error(state, "cannot combine readonly with scanconfig/answer/generatesyn");
			return EINVAL;
		}
		break;
	case O_CHKCFG: // --checkconfig
		if (opt->checkConfig==0)
			opt->checkConfig = 1;
		break;
	case O_DMPCFG: // --dumpconfig
		opt->checkConfig = 2;
		break;
	case O_POLINT: // --pollinterval=5
		opt->pollInterval = parseInt(arg, 10, 0, 3600, result);
		if (result != RESULT_OK) {
			argp_error(state, "invalid pollinterval");
			return EINVAL;
		}
		break;

	// eBUS options:
	case 'a': // --address=FF
		opt->address = (unsigned char)parseInt(arg, 16, 0, 0xff, result);
		if (result != RESULT_OK || !isMaster(opt->address)) {
			argp_error(state, "invalid address");
			return EINVAL;
		}
		break;
	case O_ANSWER: // --answer
		opt->answer = true;
		if (opt->readonly) {
			argp_error(state, "cannot combine readonly with scanconfig/answer/generatesyn");
			return EINVAL;
		}
		break;
	case O_ACQTIM: // --acquiretimeout=9400
		opt->acquireTimeout = parseInt(arg, 10, 1000, 100000, result);
		if (result != RESULT_OK) {
			argp_error(state, "invalid acquiretimeout");
			return EINVAL;
		}
		break;
	case O_ACQRET: // --acquireretries=2
		opt->acquireRetries = parseInt(arg, 10, 0, 10, result);
		if (result != RESULT_OK) {
			argp_error(state, "invalid acquireretries");
			return EINVAL;
		}
		break;
	case O_SNDRET: // --sendretries=2
		opt->sendRetries = parseInt(arg, 10, 0, 10, result);
		if (result != RESULT_OK) {
			argp_error(state, "invalid sendretries");
			return EINVAL;
		}
		break;
	case O_RCVTIM: // --receivetimeout=15000
		opt->receiveTimeout = parseInt(arg, 10, 1000, 100000, result);
		if (result != RESULT_OK) {
			argp_error(state, "invalid receivetimeout");
			return EINVAL;
		}
		break;
	case O_MASCNT: // --numbermasters=0
		opt->masterCount = parseInt(arg, 10, 0, 25, result);
		if (result != RESULT_OK) {
			argp_error(state, "invalid numbermasters");
			return EINVAL;
		}
		break;
	case O_GENSYN: // --generatesyn
		opt->generateSyn = true;
		if (opt->readonly) {
			argp_error(state, "cannot combine readonly with scanconfig/answer/generatesyn");
			return EINVAL;
		}
		break;

	// Daemon options:
	case 'f': // --foreground
		opt->foreground = true;
		break;
	case 'p': // --port=8888
		opt->port = (uint16_t)parseInt(arg, 10, 1, 65535, result);
		if (result != RESULT_OK) {
			argp_error(state, "invalid port");
			return EINVAL;
		}
		break;
	case O_LOCAL: // --localhost
		opt->localOnly = true;
		break;
	case O_HTTPPT: // --httpport
		opt->httpPort = (uint16_t)parseInt(arg, 10, 1, 65535, result);
		if (result != RESULT_OK) {
			argp_error(state, "invalid port");
			return EINVAL;
		}
		break;
	case O_HTMLPA: // --htmlpath=/var/ebusd/html
		if (arg == NULL || arg[0] == 0 || strcmp("/", arg) == 0) {
			argp_error(state, "invalid htmlpath");
			return EINVAL;
		}
		opt->htmlPath = arg;
		break;

	// Log options:
	case 'l': // --logfile=/var/log/ebusd.log
		if (arg == NULL || arg[0] == 0 || strcmp("/", arg) == 0) {
			argp_error(state, "invalid logfile");
			return EINVAL;
		}
		opt->logFile = arg;
		break;
	case O_LOGARE: // --logareas=all
		if (!setLogFacilities(arg)) {
			argp_error(state, "invalid logareas");
			return EINVAL;
		}
		break;
	case O_LOGLEV: // --loglevel=event
		if (!setLogLevel(arg)) {
			argp_error(state, "invalid loglevel");
			return EINVAL;
		}
		break;
	case O_LOGRAW:  // --lograwdata
		opt->logRaw = true;
		break;

	// Dump options:
	case 'D':  // --dump
		opt->dump = true;
		break;
	case O_DMPFIL: // --dumpfile=/tmp/ebus_dump.bin
		if (arg == NULL || arg[0] == 0 || strcmp("/", arg) == 0) {
			argp_error(state, "invalid dumpfile");
			return EINVAL;
		}
		opt->dumpFile = arg;
		break;
	case O_DMPSIZ: // --dumpsize=100
		opt->dumpSize = parseInt(arg, 10, 1, 1000000, result);
		if (result != RESULT_OK) {
			argp_error(state, "invalid dumpsize");
			return EINVAL;
		}
		break;

	default:
		return ARGP_ERR_UNKNOWN;
	}
	return 0;
}