コード例 #1
0
ファイル: test_vec.cpp プロジェクト: kwabe007/spel-spelet
int main()
{
    // Några saker som ska fungera:
    UIntVector a(7);           // initiering med 7 element
    UIntVector b(a);           // kopieringskonstruktor
    UIntVector c = a;          // kopieringskonstruktor
    UIntVector d(6);


    a = b;                 // tilldelning genom kopiering
    a[5] = 7;              // tilldelning till element

    const UIntVector e(10);    // konstant objekt med 10 element
    int i = e[5];          // const int oper[](int) const körs
    i = a[0];              // vektorn är nollindexerad
    i = a[5];              // int oper[](int) körs
    
    a[5]++;                // öka värdet till 8

    try {
        i = e[10];             // försöker hämta element som ligger utanför e
    } catch (std::out_of_range e) {
        std::cout << e.what() << std::endl;
    }
    
    // Diverse saker att testa
    e[5] = 3;              // fel: (kompilerar ej) tilldelning till const
    b = b;                 // hmm: se till att inte minnet som skall behållas frigörs

    return 0;
}
コード例 #2
0
EStatusCode CFFEmbeddedFontWriter::WriteCharsets(const UIntVector& inSubsetGlyphIDs,
													UShortVector* inCIDMapping)
{
	// since this is a subset the chances that i'll get a defult charset are 0.
	// hence i'll always do some charset. and using format 0 !!1
	UIntVector::const_iterator it = inSubsetGlyphIDs.begin();
	++it; // skip the 0

	mCharsetPosition = mFontFileStream.GetCurrentPosition();

	mPrimitivesWriter.WriteCard8(0);
	if(mIsCID && inCIDMapping)
	{
		UShortVector::const_iterator itCIDs = inCIDMapping->begin();
		++itCIDs;
		for(; it != inSubsetGlyphIDs.end(); ++it,++itCIDs)
			mPrimitivesWriter.WriteSID(*itCIDs);

	}
	else
	{
		// note that this also works for CIDs! cause in this case the SIDs are actually
		// CIDs
		for(; it != inSubsetGlyphIDs.end(); ++it)
			mPrimitivesWriter.WriteSID(mOpenTypeInput.mCFF.GetGlyphSID(0,*it));
	}
	return mPrimitivesWriter.GetInternalState();
}
コード例 #3
0
static UIntVector GetOrderedKeys(const UIntAndGlyphEncodingInfoVector& inMap)
{
	UIntVector result;
	for(UIntAndGlyphEncodingInfoVector::const_iterator it = inMap.begin(); it != inMap.end(); ++it)
		result.push_back(it->first);
	sort(result.begin(),result.end());
	return result;
}
コード例 #4
0
EStatusCode CFFDescendentFontWriter::WriteFont(	ObjectIDType inDecendentObjectID, 
														const std::string& inFontName,
														FreeTypeFaceWrapper& inFontInfo,
														const UIntAndGlyphEncodingInfoVector& inEncodedGlyphs,
														ObjectsContext* inObjectsContext,
														bool inEmbedFont)
{
	// reset embedded font object ID (and flag...to whether it was actually embedded or not, which may 
	// happen due to font embedding restrictions)
	mEmbeddedFontFileObjectID = 0;

	// Logically speaking, i shouldn't be getting to CID writing
	// if in type 1. at least, this is the current assumption, since
	// i don't intend to support type 1 CIDs, but just regular type 1s.
	// as such - fail if got here for type 1
	const char* fontType = inFontInfo.GetTypeString();
	if(strcmp(scType1,fontType) == 0)
	{
		TRACE_LOG1("CFFDescendentFontWriter::WriteFont, Exception. identified type1 font when writing CFF CID font, font name - %s. type 1 CIDs are not supported.",inFontName.substr(0, MAX_TRACE_SIZE - 200).c_str());
		return PDFHummus::eFailure;
	}

	if (inEmbedFont)
	{
		CFFEmbeddedFontWriter embeddedFontWriter;
		UIntAndGlyphEncodingInfoVector encodedGlyphs = inEncodedGlyphs;
		UIntVector orderedGlyphs;
		UShortVector cidMapping;

		// Gal: the following sort completely ruins everything.
		// the order of the glyphs should be maintained per the ENCODED characthers
		// which is how the input is recieved. IMPORTANT - the order is critical
		// for the success of the embedding, as the order determines the order of the glyphs
		// in the subset font and so their GID which MUST match the encoded char.
		//sort(encodedGlyphs.begin(), encodedGlyphs.end(), sEncodedGlypsSort);

		for (UIntAndGlyphEncodingInfoVector::const_iterator it = encodedGlyphs.begin();
			it != encodedGlyphs.end();
			++it)
		{
			orderedGlyphs.push_back(it->first);
			cidMapping.push_back(it->second.mEncodedCharacter);
		}
		EStatusCode status = embeddedFontWriter.WriteEmbeddedFont(inFontInfo,
			orderedGlyphs,
			scCIDFontType0C,
			inFontName,
			inObjectsContext,
			&cidMapping,
			mEmbeddedFontFileObjectID);
		if (status != PDFHummus::eSuccess)
			return status;
	}

	DescendentFontWriter descendentFontWriter;

	return descendentFontWriter.WriteFont(inDecendentObjectID,inFontName,inFontInfo,inEncodedGlyphs,inObjectsContext,this);
}
コード例 #5
0
void Type1ToCFFEmbeddedFontWriter::TranslateFromFreeTypeToType1(FreeTypeFaceWrapper& inFontInfo,
																const UIntVector& inSubsetGlyphIDs,
																StringVector& outGlyphNames)
{
	UIntVector::const_iterator it = inSubsetGlyphIDs.begin();
	
    for(; it != inSubsetGlyphIDs.end(); ++it)
        outGlyphNames.push_back(inFontInfo.GetGlyphName(*it));
    
}
コード例 #6
0
void init_fasta_enzyme_function ( const string& enzyme )
{
	static bool called = false;
	if ( called ) return;
	called = true;
	static DigestTable& digestTable = DigestTable::instance ();

	if ( enzyme == "No enzyme" ) {
		ErrorHandler::genError ()->error ( "No enzyme not valid option for this program." );
		return;
	}
	StringVector enzymeNameTable;
	cnbr_digest = false;
	StringSizeType start = 0;
	StringSizeType end = 0;
	for ( ; ; ) {
		end = enzyme.find_first_of ( "/", start );
		string enzymeName = enzyme.substr ( start, end-start );
		if ( enzymeName == "CNBr" ) cnbr_digest = true;
		enzymeNameTable.push_back ( enzymeName );
		if ( end == string::npos ) break;
		start = end + 1;
	}
	numDigests = enzymeNameTable.size ();

	for ( int i = 0 ; i < numDigests ; i++ ) {
		string br_aas = digestTable.getBreakMask (enzymeNameTable[i]);
		break_aas_array.push_back ( br_aas );
		break_mask_array.push_back ( string_to_mask ( br_aas ) );

		string exclude_aas = digestTable.getExcludeMask (enzymeNameTable[i]);
		exclude_aas_array.push_back ( exclude_aas );
		if ( exclude_aas != "-" ) exclude_mask_array.push_back ( string_to_mask ( exclude_aas ) );
		else exclude_mask_array.push_back ( 0 );

		digestSpecificityArray.push_back ( digestTable.getSpecificity (enzymeNameTable[i]) );
	}	
	if ( numDigests == 1 ) {
		break_mask = break_mask_array [0];
		break_aas = break_aas_array [0];
		exclude_mask = exclude_mask_array [0];
		digestSpecificity = digestSpecificityArray [0];
		if ( digestSpecificity == 'C' ) enzyme_fragmenter = calc_fasta_c_term_fragments;
		else enzyme_fragmenter = calc_fasta_n_term_fragments;
	}
	else {
		int i;
		for ( i = 1 ; i < numDigests ; i++ ) {
			if ( digestSpecificityArray [i] != digestSpecificityArray [0] ) break;
		}
		if ( i == numDigests ) digestSpecificity = digestSpecificityArray [0];	/* All digest specificities must be the same for digestSpecificity to be set */

		enzyme_fragmenter = calc_fasta_multi_digest_fragments;
	}
}
コード例 #7
0
EStatusCode CFFEmbeddedFontWriter::WriteCharStrings(const UIntVector& inSubsetGlyphIDs)
{
	/*
		1. build the charstrings data, looping the glyphs charstrings and writing a flattened
		   version of each charstring
		2. write the charstring index based on offsets inside the data (size should be according to the max)
		3. copy the data into the stream
	*/


	unsigned long* offsets = new unsigned long[inSubsetGlyphIDs.size() + 1];
	MyStringBuf charStringsData;
	OutputStringBufferStream charStringsDataWriteStream(&charStringsData);
	CharStringType2Flattener charStringFlattener;
	UIntVector::const_iterator itGlyphs = inSubsetGlyphIDs.begin();
	EStatusCode status = PDFHummus::eSuccess;

	do
	{
		unsigned short i=0;
		for(; itGlyphs != inSubsetGlyphIDs.end() && PDFHummus::eSuccess == status; ++itGlyphs,++i)
		{
			offsets[i] = (unsigned long)charStringsDataWriteStream.GetCurrentPosition();
			status = charStringFlattener.WriteFlattenedGlyphProgram(	0,
																		*itGlyphs,
																		&(mOpenTypeInput.mCFF),
																		&charStringsDataWriteStream);
		}
		if(status != PDFHummus::eSuccess)
			break;

		offsets[i] = (unsigned long)charStringsDataWriteStream.GetCurrentPosition();

		charStringsData.pubseekoff(0,std::ios_base::beg);

		// write index section
		mCharStringPosition = mFontFileStream.GetCurrentPosition();
		Byte sizeOfOffset = GetMostCompressedOffsetSize(offsets[i] + 1);
		mPrimitivesWriter.WriteCard16((unsigned short)inSubsetGlyphIDs.size());
		mPrimitivesWriter.WriteOffSize(sizeOfOffset);
		mPrimitivesWriter.SetOffSize(sizeOfOffset);
		for(i=0;i<=inSubsetGlyphIDs.size();++i)
			mPrimitivesWriter.WriteOffset(offsets[i] + 1);

		// Write data
		InputStringBufferStream charStringsDataReadStream(&charStringsData);
		OutputStreamTraits streamCopier(&mFontFileStream);
		status = streamCopier.CopyToOutputStream(&charStringsDataReadStream);
		if(status != PDFHummus::eSuccess)
			break;
	}while(false);

	delete[] offsets;
	return status;
}
コード例 #8
0
EStatusCode CFFEmbeddedFontWriter::WriteFDSelect(const UIntVector& inSubsetGlyphIDs,const FontDictInfoToByteMap& inNewFontDictsIndexes)
{
	// always write format 3. cause at most cases the FD dicts count will be so low that it'd
	// take a bloody mircale for no repeats to occur.
	UIntVector::const_iterator itGlyphs = inSubsetGlyphIDs.begin();


	mFDSelectPosition = mFontFileStream.GetCurrentPosition();
	mPrimitivesWriter.WriteCard8(3);

	LongFilePositionType rangesCountPosition = mFontFileStream.GetCurrentPosition();
	mPrimitivesWriter.WriteCard16(1); // temporary. will get back to this later

	unsigned short rangesCount = 1;
	Byte currentFD,newFD;
	unsigned short glyphIndex = 1;
	FontDictInfoToByteMap::const_iterator itNewIndex = 
		inNewFontDictsIndexes.find(mOpenTypeInput.mCFF.mTopDictIndex[0].mFDSelect[*itGlyphs]);
	
	// k. seems like i probably just imagine exceptions here. i guess there must
	// be a proper FDSelect with FDs for all...so i'm defaulting to some 0
	currentFD = (itNewIndex == inNewFontDictsIndexes.end() ? 0:itNewIndex->second);
	mPrimitivesWriter.WriteCard16(0);
	mPrimitivesWriter.WriteCard8(currentFD);
	++itGlyphs;

	for(; itGlyphs != inSubsetGlyphIDs.end(); ++itGlyphs,++glyphIndex)
	{
		itNewIndex = 
				inNewFontDictsIndexes.find(mOpenTypeInput.mCFF.mTopDictIndex[0].mFDSelect[*itGlyphs]);
		newFD = (itNewIndex == inNewFontDictsIndexes.end() ? 0:itNewIndex->second);
		if(newFD != currentFD)
		{
			currentFD = newFD;
			mPrimitivesWriter.WriteCard16(glyphIndex);
			mPrimitivesWriter.WriteCard8(currentFD);
			++rangesCount;
		}
	}
	mPrimitivesWriter.WriteCard16((unsigned short)inSubsetGlyphIDs.size());
	// go back to ranges count if not equal to what's already written
	if(rangesCount != 1)
	{
		LongFilePositionType currentPosition = mFontFileStream.GetCurrentPosition();
		mFontFileStream.SetPosition(rangesCountPosition);
		mPrimitivesWriter.WriteCard16(rangesCount);
		mFontFileStream.SetPosition(currentPosition);
	}
	return mPrimitivesWriter.GetInternalState();
}
コード例 #9
0
EStatusCode CFFEmbeddedFontWriter::AddDependentGlyphs(UIntVector& ioSubsetGlyphIDs)
{
	EStatusCode status = PDFHummus::eSuccess;
	UIntSet glyphsSet;
	UIntVector::iterator it = ioSubsetGlyphIDs.begin();
	bool hasCompositeGlyphs = false;

	for(;it != ioSubsetGlyphIDs.end() && PDFHummus::eSuccess == status; ++it)
	{
		bool localHasCompositeGlyphs;
		status = AddComponentGlyphs(*it,glyphsSet,localHasCompositeGlyphs);
		hasCompositeGlyphs |= localHasCompositeGlyphs;
	}

	if(hasCompositeGlyphs)
	{
		UIntSet::iterator itNewGlyphs;

		for(it = ioSubsetGlyphIDs.begin();it != ioSubsetGlyphIDs.end(); ++it)
			glyphsSet.insert(*it);

		ioSubsetGlyphIDs.clear();
		for(itNewGlyphs = glyphsSet.begin(); itNewGlyphs != glyphsSet.end(); ++itNewGlyphs)
			ioSubsetGlyphIDs.push_back(*itNewGlyphs);
		
		sort(ioSubsetGlyphIDs.begin(),ioSubsetGlyphIDs.end());
	}	
	return status;
}
コード例 #10
0
EStatusCode TrueTypeEmbeddedFontWriter::WriteGlyf(const UIntVector& inSubsetGlyphIDs,unsigned long* inLocaTable)
{
	// k. write the glyphs table. you only need to write the glyphs you are actually using.
	// while at it...update the locaTable

	TableEntry* tableEntry = mTrueTypeInput.GetTableEntry("glyf");
	LongFilePositionType startTableOffset = mFontFileStream.GetCurrentPosition();
	UIntVector::const_iterator it = inSubsetGlyphIDs.begin();
	OutputStreamTraits streamCopier(&mFontFileStream);
	unsigned short glyphIndex,previousGlyphIndexEnd = 0;
	inLocaTable[0] = 0;
	EStatusCode status = eSuccess;

	for(;it != inSubsetGlyphIDs.end() && eSuccess == status; ++it)
	{
		glyphIndex = *it;
		if(glyphIndex >= mTrueTypeInput.mMaxp.NumGlyphs)
		{
			TRACE_LOG2("TrueTypeEmbeddedFontWriter::WriteGlyf, error, requested glyph index %ld is larger than the maximum glyph index for this font which is %ld. ",glyphIndex,mTrueTypeInput.mMaxp.NumGlyphs-1);
			status = eFailure;
			break;
		}

		for(unsigned short i= previousGlyphIndexEnd + 1; i<=glyphIndex;++i)
			inLocaTable[i] = inLocaTable[previousGlyphIndexEnd];
		if(mTrueTypeInput.mGlyf[glyphIndex] != NULL)
		{
			mTrueTypeFile.GetInputStream()->SetPosition(tableEntry->Offset + 
															mTrueTypeInput.mLoca[glyphIndex]);
			streamCopier.CopyToOutputStream(mTrueTypeFile.GetInputStream(),
				mTrueTypeInput.mLoca[(glyphIndex) + 1] - mTrueTypeInput.mLoca[glyphIndex]);
		}
		inLocaTable[glyphIndex + 1] = (unsigned long)(mFontFileStream.GetCurrentPosition() - startTableOffset);
		previousGlyphIndexEnd = glyphIndex + 1;
	}

	LongFilePositionType endOfTable = mFontFileStream.GetCurrentPosition();
	mPrimitivesWriter.PadTo4();
	LongFilePositionType endOfStream = mFontFileStream.GetCurrentPosition();

	// write table entry data, which includes movement
	WriteTableEntryData(mGLYFEntryWritingOffset,
						startTableOffset,
						(unsigned long)(endOfTable - startTableOffset));

	// restore position to end of stream
	mFontFileStream.SetPosition(endOfStream); 

	return mPrimitivesWriter.GetInternalState();	
}
コード例 #11
0
void CFFEmbeddedFontWriter::DetermineFDArrayIndexes(const UIntVector& inSubsetGlyphIDs,FontDictInfoToByteMap& outNewFontDictsIndexes)
{
	UIntVector::const_iterator itGlyphs = inSubsetGlyphIDs.begin();
	FontDictInfoSet fontDictInfos;

	for(; itGlyphs != inSubsetGlyphIDs.end(); ++itGlyphs)
		if(mOpenTypeInput.mCFF.mTopDictIndex[0].mFDSelect[*itGlyphs])
			fontDictInfos.insert(mOpenTypeInput.mCFF.mTopDictIndex[0].mFDSelect[*itGlyphs]);

	FontDictInfoSet::iterator itFontInfos;
	Byte i=0;

	for(itFontInfos = fontDictInfos.begin(); itFontInfos != fontDictInfos.end(); ++itFontInfos,++i)
		outNewFontDictsIndexes.insert(FontDictInfoToByteMap::value_type(*itFontInfos,i));
}
コード例 #12
0
void TowerTemporal::update(float t)
{
  static const float sShotFadeOut = 0.5f;
  mTimeSinceLastAction += t;
  float atkDur = 1.0f/mAtkSpeed;
  if (mTimeSinceLastAction >= atkDur)
  {
    // Get all Aliens within the tower's range
    Alien* alien = NULL;
    UIntVector ids = getTargetsInRange();
    for (size_t i = 0; i < ids.size(); ++i)
    {
      unsigned int id = ids[i];
      alien = AlienFactory::getSingleton().getAlien(id);
      if (alien && alien->getState() != Alien::DYING && alien->getState() != Alien::DEAD)
        alien->slow(0.5f, atkDur*0.5f);
    }

    // Reset the scale of the shot graphics
    mpShotGraphics->getParentSceneNode()->setScale(Ogre::Vector3::UNIT_SCALE);
    mpShotGraphics->setVisible(true);

    // Reset the time since last action
    mTimeSinceLastAction = 0;
  }
  
  // Update the shot graphics
  if (mTimeSinceLastAction < sShotFadeOut)
  {
    float shotPerc = mTimeSinceLastAction / sShotFadeOut; 
    Ogre::Vector3 scale = Ogre::Vector3::UNIT_SCALE * Ogre::Math::Sqrt(mRangeSqr) * 0.5;
    scale = scale * Ogre::Math::Sqrt(shotPerc);
    mpShotGraphics->getParentSceneNode()->setScale(scale);
    mpShotGraphics->beginUpdate(0);
    for (size_t i = 0; i < mRingVertices.size(); ++i)
    {
      mpShotGraphics->position(mRingVertices[i]);
      mpShotGraphics->colour(0.33, 0.33, 1, 1-shotPerc);
    }
    mpShotGraphics->end();
  }
  else
  {
    mpShotGraphics->setVisible(false);
  }
}
コード例 #13
0
ファイル: KNearest.cpp プロジェクト: snailcoder/PASS
UIntVector KNearest::Nearest(const RealCoord &state, 
						     const RealCoordVector &state_set) const {
  UIntVector neighbors;
  std::size_t state_num = state_set.size();
  if (state_num <= k_) {
    for (unsigned i = 0; i < state_num; ++i) {
      neighbors.push_back(i);
    }
  } else {
    double *dist = new double[state_num];
	for (unsigned i = 0; i < state_num; ++i) {
      dist[i] = metric_->Distance(state, state_set[i]);
	}
	for (unsigned i = 0; i <= k_; ++i) {
      neighbors.push_back(i);
	}
	for (unsigned i = 1; i < k_; ++i) {
      double di = dist[i];
      unsigned j = i - 1;
	  while (j != std::numeric_limits<unsigned>::max() && dist[i] < dist[j]) {
        dist[j + 1] = dist[j];
        neighbors[j + 1] = neighbors[j];
        --j;
      }
      dist[j + 1] = di;
      neighbors[j + 1] = i;
    }
    for (unsigned i = k_; i < state_num; ++i) {
      double di = dist[i];
      unsigned j = k_ - 1;
	  while (j != std::numeric_limits<unsigned>::max() && dist[i] < dist[j]) {
        dist[j + 1] = dist[j];
        neighbors[j + 1] = neighbors[j];
        --j;
	  }
      neighbors[j + 1] = i;
	}
    delete[] dist;
  }
  neighbors.pop_back();
  return neighbors;
}
コード例 #14
0
ファイル: WaveletTree.cpp プロジェクト: iborko/fmindex
char WaveletTree::calc_pivot(const std::string& alph, const CharIntMap& counts) {

    UIntVector scores;
    scores.resize(alph.size(), 0);

    for (std::size_t i = 0; i < alph.size(); i++) {

        // calc the number of 0s and 1s
        UInt sum_before = 0;
        UInt sum_after = 0;
        for (std::size_t j = 0; j < i; j++) {
            // operator[] can't be used in const map, so I had to use
            // find to get iterator
            CharIntMap::const_iterator elem = counts.find(alph[j]);
            if (elem == counts.end())
                throw std::runtime_error("counts has missing key");
            sum_before += elem->second;
        }
        for (std::size_t j = i; j < alph.size(); j++) {
            CharIntMap::const_iterator elem = counts.find(alph[j]);
            if (elem == counts.end())
                throw std::runtime_error("counts has missing key");
            sum_after += elem->second;
        }

        // diff between numbers of 0s and 1s
        scores[i] = abs(sum_before - sum_after);
        
        // if current score is worse than prev, than prev is the best,
        // continuing will only give worse scores
        if (i > 0 && scores[i] > scores[i-1])
            return alph[i-1];
    }

    return alph[alph.size() - 1];
}
コード例 #15
0
void TrueTypeEmbeddedFontWriter::AddDependentGlyphs(UIntVector& ioSubsetGlyphIDs)
{
	UIntSet glyphsSet;
	UIntVector::iterator it = ioSubsetGlyphIDs.begin();
	bool hasCompositeGlyphs = false;

	for(;it != ioSubsetGlyphIDs.end(); ++it)
		hasCompositeGlyphs |= AddComponentGlyphs(*it,glyphsSet);

	if(hasCompositeGlyphs)
	{
		UIntSet::iterator itNewGlyphs;

		for(it = ioSubsetGlyphIDs.begin();it != ioSubsetGlyphIDs.end(); ++it)
			glyphsSet.insert(*it);

		ioSubsetGlyphIDs.clear();
		for(itNewGlyphs = glyphsSet.begin(); itNewGlyphs != glyphsSet.end(); ++itNewGlyphs)
			ioSubsetGlyphIDs.push_back(*itNewGlyphs);
		
		sort(ioSubsetGlyphIDs.begin(),ioSubsetGlyphIDs.end());
	}
}
コード例 #16
0
ファイル: test_vec.cpp プロジェクト: TobiasLundin/Cpp
int main()
{
    // Några saker som ska fungera:
    UIntVector a(10);               // initiering med 7 element
    std::cout << "a(10)"<< a.length << std::endl;
    std::cout << "kopiering" << std::endl;
    UIntVector b(a);           // kopieringskonstruktor 
    std::cout << "kopiering" << std::endl;
    a = a;
    std::cout << "s**t" << std::endl;
    UIntVector c = a;          // kopieringskonstruktor 

    //Extra tester för alla Requirments
    a = b;                 // tilldelning genom kopiering
    a[5] = 7;              // tilldelning till element

    const UIntVector e(100000);    // konstant objekt med 10 element
    int i = e[5];          // const int oper[](int) const körs
    i = a[0];              // vektorn är nollindexerad
    i = a[5];              // int oper[](int) körs
    
    a[5]++;                // öka värdet till 8
    
    //Extra tester för alla Requirments
    std::cout << "(1)TEST" << std::endl;
    int aa = e[9];
    int ab = e[0];
    std::cout << "(1)S**T" << aa << ab << std::endl;


    std::cout << "(2)TEST" << std::endl;
    for(long int i = 0; i < 100000; i++)
    {
        e[i];
    } 
    std::cout << "(2)S**T" << std::endl;




    std::cout << "(3)TEST" << std::endl;
    UIntVector a3(10); UIntVector b3(0); UIntVector c3(0);
    b3 = a3;
    a3 = c3;
    std::cout << "(3)S**T" << std::endl;




    std::cout << "(4) START" << std::endl;
    std::initializer_list<unsigned int> list = {1,2,3};
    UIntVector a4(list); UIntVector b4(0);
    a4 = b4;
    std::cout << "length a" << a4.size() << "len b " << b4.size() << std::endl;
    std::cout << "(4) S**T" << std::endl;



    std::cout << "(5)TEST" << std::endl;
    UIntVector b5(list);
    UIntVector a5(std::move(b5));
    std::cout << "(5)S**T" << std::endl;





    std::cout << "(6)TEST" << std::endl;
    UIntVector a6(30);
    UIntVector b6(a6);
    std::cout << "(6)S**T" << std::endl;


    std::cout << "(7)TEST" << std::endl;
    UIntVector a7(1); 
    std::cout << "a) len innan " <<a7.length << std::endl;
    UIntVector b7(std::move(a7));
    std::cout << "b) len " <<b7.length << std::endl;
    std::cout << "a) len " <<a7.length << std::endl;
    std::cout << "(7)S**T" << std::endl;

    std::cout << "(8)TEST" << std::endl;
    UIntVector a8(10);
    a8.reset();
    UIntVector b8(11);
    std::cout << "a) INNAN len " <<a8.size() << "ptr " << a8.vector_ptr <<std::endl;
    UIntVector c8(std::move(a8));
    std::cout << "c) len " <<c8.size() << "ptr" << c8.vector_ptr <<std::endl;
    std::cout << "a) len " <<a8.size() << "ptr " << a8.vector_ptr <<std::endl;
    std::cout << "(8)S**T" << std::endl;


    std::cout << "(9)TEST COPY TO SELF" << std::endl;
    b8 = b8;
    std::cout << "(9)S**T" << std::endl;
    try {
        i = e[10];             // försöker hämta element som ligger utanför e
    } catch (std::out_of_range e) {
        std::cout << e.what() << std::endl;
    }

    
#if 0
    // Diverse saker att testa
    e[5] = 3;              // fel: (kompilerar ej) tilldelning till const
    b = b;                 // hmm: se till att inte minnet som skall behållas frigörs
#endif

    return 0;
}
コード例 #17
0
ファイル: Subdivision.cpp プロジェクト: 1vanK/Urho3D
// ------------------------------------------------------------------------------------------------
// Note - this is an implementation of the standard (recursive) Cm-Cl algorithm without further
// optimizations (except we're using some nice LUTs). A description of the algorithm can be found
// here: http://en.wikipedia.org/wiki/Catmull-Clark_subdivision_surface
//
// The code is mostly O(n), however parts are O(nlogn) which is therefore the algorithm's
// expected total runtime complexity. The implementation is able to work in-place on the same
// mesh arrays. Calling #InternSubdivide() directly is not encouraged. The code can operate
// in-place unless 'smesh' and 'out' are equal (no strange overlaps or reorderings).
// Previous data is replaced/deleted then.
// ------------------------------------------------------------------------------------------------
void CatmullClarkSubdivider::InternSubdivide (
    const aiMesh* const * smesh,
    size_t nmesh,
    aiMesh** out,
    unsigned int num
    )
{
    ai_assert(NULL != smesh && NULL != out);
    INIT_EDGE_HASH_TEMPORARIES();

    // no subdivision requested or end of recursive refinement
    if (!num) {
        return;
    }

    UIntVector maptbl;
    SpatialSort spatial;

    // ---------------------------------------------------------------------
    // 0. Offset table to index all meshes continuously, generate a spatially
    // sorted representation of all vertices in all meshes.
    // ---------------------------------------------------------------------
    typedef std::pair<unsigned int,unsigned int> IntPair;
    std::vector<IntPair> moffsets(nmesh);
    unsigned int totfaces = 0, totvert = 0;
    for (size_t t = 0; t < nmesh; ++t) {
        const aiMesh* mesh = smesh[t];

        spatial.Append(mesh->mVertices,mesh->mNumVertices,sizeof(aiVector3D),false);
        moffsets[t] = IntPair(totfaces,totvert);

        totfaces += mesh->mNumFaces;
        totvert  += mesh->mNumVertices;
    }

    spatial.Finalize();
    const unsigned int num_unique = spatial.GenerateMappingTable(maptbl,ComputePositionEpsilon(smesh,nmesh));


#define FLATTEN_VERTEX_IDX(mesh_idx, vert_idx) (moffsets[mesh_idx].second+vert_idx)
#define   FLATTEN_FACE_IDX(mesh_idx, face_idx) (moffsets[mesh_idx].first+face_idx)

    // ---------------------------------------------------------------------
    // 1. Compute the centroid point for all faces
    // ---------------------------------------------------------------------
    std::vector<Vertex> centroids(totfaces);
    unsigned int nfacesout = 0;
    for (size_t t = 0, n = 0; t < nmesh; ++t) {
        const aiMesh* mesh = smesh[t];
        for (unsigned int i = 0; i < mesh->mNumFaces;++i,++n)
        {
            const aiFace& face = mesh->mFaces[i];
            Vertex& c = centroids[n];

            for (unsigned int a = 0; a < face.mNumIndices;++a) {
                c += Vertex(mesh,face.mIndices[a]);
            }

            c /= static_cast<float>(face.mNumIndices);
            nfacesout += face.mNumIndices;
        }
    }

    {
    // we want edges to go away before the recursive calls so begin a new scope
    EdgeMap edges;

    // ---------------------------------------------------------------------
    // 2. Set each edge point to be the average of all neighbouring
    // face points and original points. Every edge exists twice
    // if there is a neighboring face.
    // ---------------------------------------------------------------------
    for (size_t t = 0; t < nmesh; ++t) {
        const aiMesh* mesh = smesh[t];

        for (unsigned int i = 0; i < mesh->mNumFaces;++i)   {
            const aiFace& face = mesh->mFaces[i];

            for (unsigned int p =0; p< face.mNumIndices; ++p) {
                const unsigned int id[] = {
                    face.mIndices[p],
                    face.mIndices[p==face.mNumIndices-1?0:p+1]
                };
                const unsigned int mp[] = {
                    maptbl[FLATTEN_VERTEX_IDX(t,id[0])],
                    maptbl[FLATTEN_VERTEX_IDX(t,id[1])]
                };

                Edge& e = edges[MAKE_EDGE_HASH(mp[0],mp[1])];
                e.ref++;
                if (e.ref<=2) {
                    if (e.ref==1) { // original points (end points) - add only once
                        e.edge_point = e.midpoint = Vertex(mesh,id[0])+Vertex(mesh,id[1]);
                        e.midpoint *= 0.5f;
                    }
                    e.edge_point += centroids[FLATTEN_FACE_IDX(t,i)];
                }
            }
        }
    }

    // ---------------------------------------------------------------------
    // 3. Normalize edge points
    // ---------------------------------------------------------------------
    {unsigned int bad_cnt = 0;
    for (EdgeMap::iterator it = edges.begin(); it != edges.end(); ++it) {
        if ((*it).second.ref < 2) {
            ai_assert((*it).second.ref);
            ++bad_cnt;
        }
        (*it).second.edge_point *= 1.f/((*it).second.ref+2.f);
    }

    if (bad_cnt) {
        // Report the number of bad edges. bad edges are referenced by less than two
        // faces in the mesh. They occur at outer model boundaries in non-closed
        // shapes.
        char tmp[512];
        ai_snprintf(tmp, 512, "Catmull-Clark Subdivider: got %u bad edges touching only one face (totally %u edges). ",
            bad_cnt,static_cast<unsigned int>(edges.size()));

        DefaultLogger::get()->debug(tmp);
    }}

    // ---------------------------------------------------------------------
    // 4. Compute a vertex-face adjacency table. We can't reuse the code
    // from VertexTriangleAdjacency because we need the table for multiple
    // meshes and out vertex indices need to be mapped to distinct values
    // first.
    // ---------------------------------------------------------------------
    UIntVector faceadjac(nfacesout), cntadjfac(maptbl.size(),0), ofsadjvec(maptbl.size()+1,0); {
    for (size_t t = 0; t < nmesh; ++t) {
        const aiMesh* const minp = smesh[t];
        for (unsigned int i = 0; i < minp->mNumFaces; ++i) {

            const aiFace& f = minp->mFaces[i];
            for (unsigned int n = 0; n < f.mNumIndices; ++n) {
                ++cntadjfac[maptbl[FLATTEN_VERTEX_IDX(t,f.mIndices[n])]];
            }
        }
    }
    unsigned int cur = 0;
    for (size_t i = 0; i < cntadjfac.size(); ++i) {
        ofsadjvec[i+1] = cur;
        cur += cntadjfac[i];
    }
    for (size_t t = 0; t < nmesh; ++t) {
        const aiMesh* const minp = smesh[t];
        for (unsigned int i = 0; i < minp->mNumFaces; ++i) {

            const aiFace& f = minp->mFaces[i];
            for (unsigned int n = 0; n < f.mNumIndices; ++n) {
                faceadjac[ofsadjvec[1+maptbl[FLATTEN_VERTEX_IDX(t,f.mIndices[n])]]++] = FLATTEN_FACE_IDX(t,i);
            }
        }
    }

    // check the other way round for consistency
#ifdef ASSIMP_BUILD_DEBUG

    for (size_t t = 0; t < ofsadjvec.size()-1; ++t) {
        for (unsigned int m = 0; m <  cntadjfac[t]; ++m) {
            const unsigned int fidx = faceadjac[ofsadjvec[t]+m];
            ai_assert(fidx < totfaces);
            for (size_t n = 1; n < nmesh; ++n) {

                if (moffsets[n].first > fidx) {
                    const aiMesh* msh = smesh[--n];
                    const aiFace& f = msh->mFaces[fidx-moffsets[n].first];

                    bool haveit = false;
                    for (unsigned int i = 0; i < f.mNumIndices; ++i) {
                        if (maptbl[FLATTEN_VERTEX_IDX(n,f.mIndices[i])]==(unsigned int)t) {
                            haveit = true;
                            break;
                        }
                    }
                    ai_assert(haveit);
                    if (!haveit) {
                        DefaultLogger::get()->debug("Catmull-Clark Subdivider: Index not used");
                    }
                    break;
                }
            }
        }
    }

#endif
    }

#define GET_ADJACENT_FACES_AND_CNT(vidx,fstartout,numout) \
    fstartout = &faceadjac[ofsadjvec[vidx]], numout = cntadjfac[vidx]

    typedef std::pair<bool,Vertex> TouchedOVertex;
    std::vector<TouchedOVertex > new_points(num_unique,TouchedOVertex(false,Vertex()));
    // ---------------------------------------------------------------------
    // 5. Spawn a quad from each face point to the corresponding edge points
    // the original points being the fourth quad points.
    // ---------------------------------------------------------------------
    for (size_t t = 0; t < nmesh; ++t) {
        const aiMesh* const minp = smesh[t];
        aiMesh* const mout = out[t] = new aiMesh();

        for (unsigned int a  = 0; a < minp->mNumFaces; ++a) {
            mout->mNumFaces += minp->mFaces[a].mNumIndices;
        }

        // We need random access to the old face buffer, so reuse is not possible.
        mout->mFaces = new aiFace[mout->mNumFaces];

        mout->mNumVertices = mout->mNumFaces*4;
        mout->mVertices = new aiVector3D[mout->mNumVertices];

        // quads only, keep material index
        mout->mPrimitiveTypes = aiPrimitiveType_POLYGON;
        mout->mMaterialIndex = minp->mMaterialIndex;

        if (minp->HasNormals()) {
            mout->mNormals = new aiVector3D[mout->mNumVertices];
        }

        if (minp->HasTangentsAndBitangents()) {
            mout->mTangents = new aiVector3D[mout->mNumVertices];
            mout->mBitangents = new aiVector3D[mout->mNumVertices];
        }

        for(unsigned int i = 0; minp->HasTextureCoords(i); ++i) {
            mout->mTextureCoords[i] = new aiVector3D[mout->mNumVertices];
            mout->mNumUVComponents[i] = minp->mNumUVComponents[i];
        }

        for(unsigned int i = 0; minp->HasVertexColors(i); ++i) {
            mout->mColors[i] = new aiColor4D[mout->mNumVertices];
        }

        mout->mNumVertices = mout->mNumFaces<<2u;
        for (unsigned int i = 0, v = 0, n = 0; i < minp->mNumFaces;++i) {

            const aiFace& face = minp->mFaces[i];
            for (unsigned int a = 0; a < face.mNumIndices;++a)  {

                // Get a clean new face.
                aiFace& faceOut = mout->mFaces[n++];
                faceOut.mIndices = new unsigned int [faceOut.mNumIndices = 4];

                // Spawn a new quadrilateral (ccw winding) for this original point between:
                // a) face centroid
                centroids[FLATTEN_FACE_IDX(t,i)].SortBack(mout,faceOut.mIndices[0]=v++);

                // b) adjacent edge on the left, seen from the centroid
                const Edge& e0 = edges[MAKE_EDGE_HASH(maptbl[FLATTEN_VERTEX_IDX(t,face.mIndices[a])],
                    maptbl[FLATTEN_VERTEX_IDX(t,face.mIndices[a==face.mNumIndices-1?0:a+1])
                    ])];  // fixme: replace with mod face.mNumIndices?

                // c) adjacent edge on the right, seen from the centroid
                const Edge& e1 = edges[MAKE_EDGE_HASH(maptbl[FLATTEN_VERTEX_IDX(t,face.mIndices[a])],
                    maptbl[FLATTEN_VERTEX_IDX(t,face.mIndices[!a?face.mNumIndices-1:a-1])
                    ])];  // fixme: replace with mod face.mNumIndices?

                e0.edge_point.SortBack(mout,faceOut.mIndices[3]=v++);
                e1.edge_point.SortBack(mout,faceOut.mIndices[1]=v++);

                // d= original point P with distinct index i
                // F := 0
                // R := 0
                // n := 0
                // for each face f containing i
                //    F := F+ centroid of f
                //    R := R+ midpoint of edge of f from i to i+1
                //    n := n+1
                //
                // (F+2R+(n-3)P)/n
                const unsigned int org = maptbl[FLATTEN_VERTEX_IDX(t,face.mIndices[a])];
                TouchedOVertex& ov = new_points[org];

                if (!ov.first) {
                    ov.first = true;

                    const unsigned int* adj; unsigned int cnt;
                    GET_ADJACENT_FACES_AND_CNT(org,adj,cnt);

                    if (cnt < 3) {
                        ov.second = Vertex(minp,face.mIndices[a]);
                    }
                    else {

                        Vertex F,R;
                        for (unsigned int o = 0; o < cnt; ++o) {
                            ai_assert(adj[o] < totfaces);
                            F += centroids[adj[o]];

                            // adj[0] is a global face index - search the face in the mesh list
                            const aiMesh* mp = NULL;
                            size_t nidx;

                            if (adj[o] < moffsets[0].first) {
                                mp = smesh[nidx=0];
                            }
                            else {
                                for (nidx = 1; nidx<= nmesh; ++nidx) {
                                    if (nidx == nmesh ||moffsets[nidx].first > adj[o]) {
                                        mp = smesh[--nidx];
                                        break;
                                    }
                                }
                            }

                            ai_assert(adj[o]-moffsets[nidx].first < mp->mNumFaces);
                            const aiFace& f = mp->mFaces[adj[o]-moffsets[nidx].first];
                            bool haveit = false;

                            // find our original point in the face
                            for (unsigned int m = 0; m < f.mNumIndices; ++m) {
                                if (maptbl[FLATTEN_VERTEX_IDX(nidx,f.mIndices[m])] == org) {

                                    // add *both* edges. this way, we can be sure that we add
                                    // *all* adjacent edges to R. In a closed shape, every
                                    // edge is added twice - so we simply leave out the
                                    // factor 2.f in the amove formula and get the right
                                    // result.

                                    const Edge& c0 = edges[MAKE_EDGE_HASH(org,maptbl[FLATTEN_VERTEX_IDX(
                                        nidx,f.mIndices[!m?f.mNumIndices-1:m-1])])];
                                    // fixme: replace with mod face.mNumIndices?

                                    const Edge& c1 = edges[MAKE_EDGE_HASH(org,maptbl[FLATTEN_VERTEX_IDX(
                                        nidx,f.mIndices[m==f.mNumIndices-1?0:m+1])])];
                                    // fixme: replace with mod face.mNumIndices?
                                    R += c0.midpoint+c1.midpoint;

                                    haveit = true;
                                    break;
                                }
                            }

                            // this invariant *must* hold if the vertex-to-face adjacency table is valid
                            ai_assert(haveit);
                            if ( !haveit ) {
                                DefaultLogger::get()->warn( "OBJ: no name for material library specified." );
                            }
                        }

                        const float div = static_cast<float>(cnt), divsq = 1.f/(div*div);
                        ov.second = Vertex(minp,face.mIndices[a])*((div-3.f) / div) + R*divsq + F*divsq;
                    }
                }
                ov.second.SortBack(mout,faceOut.mIndices[2]=v++);
            }
        }
    }
    }  // end of scope for edges, freeing its memory

    // ---------------------------------------------------------------------
    // 7. Apply the next subdivision step.
    // ---------------------------------------------------------------------
    if (num != 1) {
        std::vector<aiMesh*> tmp(nmesh);
        InternSubdivide (out,nmesh,&tmp.front(),num-1);
        for (size_t i = 0; i < nmesh; ++i) {
            delete out[i];
            out[i] = tmp[i];
        }
    }
}
コード例 #18
0
EStatusCode Type1ToCFFEmbeddedFontWriter::CreateCFFSubset(	
															FreeTypeFaceWrapper& inFontInfo,
															const UIntVector& inSubsetGlyphIDs,
															const std::string& inSubsetFontName,
															bool& outNotEmbedded,
															MyStringBuf& outFontProgram)
{
	EStatusCode status;

	do
	{
		UIntVector subsetGlyphIDs = inSubsetGlyphIDs;
		StringVector subsetGlyphNames;

		if(subsetGlyphIDs.front() != 0) // make sure 0 glyph is in
			subsetGlyphIDs.insert(subsetGlyphIDs.begin(),0);

		status = mType1File.OpenFile(inFontInfo.GetFontFilePath());
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG1("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, cannot open Type 1 font file at %s",inFontInfo.GetFontFilePath().c_str());
			break;
		}

		status = mType1Input.ReadType1File(mType1File.GetInputStream());
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to read Type 1 file");
			break;
		}

		// see if font may be embedded
		if(mType1Input.mFontDictionary.FSTypeValid || mType1Input.mFontInfoDictionary.FSTypeValid)
		{
			if(!FSType(
					mType1Input.mFontInfoDictionary.FSTypeValid ? 
						mType1Input.mFontInfoDictionary.fsType :
						mType1Input.mFontDictionary.fsType).CanEmbed())
			{
				outNotEmbedded = true;
				return PDFHummus::eSuccess;
			}
			else
				outNotEmbedded = false;
		}
		else
			outNotEmbedded = false;


		// Found big gap between FreeType indexing and the way it's in the Type 1. obvioulsy due to encoding differences.
		// So i'm replacing the indexes of free type, with names...should be safer (also cleans up invalid glyph ids, in case
        // direct glyphs placement put them here)
        TranslateFromFreeTypeToType1(inFontInfo,subsetGlyphIDs,subsetGlyphNames);

		status = AddDependentGlyphs(subsetGlyphNames);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to add dependent glyphs");
			break;
		}

		mFontFileStream.Assign(&outFontProgram);
		mPrimitivesWriter.SetStream(&mFontFileStream);

		status = WriteCFFHeader();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to write CFF header");
			break;
		}

		status = WriteName(inSubsetFontName);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to write CFF Name");
			break;
		}

		status = WriteTopIndex();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to write Top Index");
			break;
		}

		// prepraring charset happens here, so that any added strings to the string index will happen...before 
		// the index is written
		PrepareCharSetArray(subsetGlyphNames);

		status = WriteStringIndex();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to write String Index");
			break;
		}

		status = WriteGlobalSubrsIndex();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to write global subrs index");
			break;
		}

		status = WriteEncodings(subsetGlyphNames);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to write encodings");
			break;
		}

		status = WriteCharsets(subsetGlyphNames);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to write charstring");
			break;
		}

		status = WriteCharStrings(subsetGlyphNames);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to write charstring");
			break;
		}

		status = WritePrivateDictionary();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to write private");
			break;
		}

		status = UpdateIndexesAtTopDict();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("Type1ToCFFEmbeddedFontWriter::CreateCFFSubset, failed to update indexes");
			break;			
		}
	}while(false);

	mType1File.CloseFile();
	FreeTemporaryStructs();
	return status;	
}
コード例 #19
0
EStatusCode CFFEmbeddedFontWriter::CreateCFFSubset(	
									FreeTypeFaceWrapper& inFontInfo,
									const UIntVector& inSubsetGlyphIDs,
									UShortVector* inCIDMapping,
									const std::string& inSubsetFontName,
									bool& outNotEmbedded,
									MyStringBuf& outFontProgram)
{
	EStatusCode status;

	do
	{

		status = mOpenTypeFile.OpenFile(inFontInfo.GetFontFilePath());
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG1("CFFEmbeddedFontWriter::CreateCFFSubset, cannot open type font file at %s",inFontInfo.GetFontFilePath().c_str());
			break;
		}

		status = mOpenTypeInput.ReadOpenTypeFile(mOpenTypeFile.GetInputStream(),(unsigned short)inFontInfo.GetFontIndex());
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to read true type file");
			break;
		}

		if(mOpenTypeInput.GetOpenTypeFontType() != EOpenTypeCFF)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, font file is not CFF, so there is an exceptions here. expecting CFFs only");
			break;
		}

		// see if font may be embedded
		if(mOpenTypeInput.mOS2Exists && !FSType(mOpenTypeInput.mOS2.fsType).CanEmbed())
		{
			outNotEmbedded = true;
			return PDFHummus::eSuccess;
		}
		else
			outNotEmbedded = false;

		UIntVector subsetGlyphIDs = inSubsetGlyphIDs;
		if(subsetGlyphIDs.front() != 0) // make sure 0 glyph is in
			subsetGlyphIDs.insert(subsetGlyphIDs.begin(),0);

		status = AddDependentGlyphs(subsetGlyphIDs);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to add dependent glyphs");
			break;
		}

		mIsCID = mOpenTypeInput.mCFF.mTopDictIndex[0].mTopDict.find(scROS) != 
					mOpenTypeInput.mCFF.mTopDictIndex[0].mTopDict.end();

		mFontFileStream.Assign(&outFontProgram);
		mPrimitivesWriter.SetStream(&mFontFileStream);

		status = WriteCFFHeader();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to write CFF header");
			break;
		}

		status = WriteName(inSubsetFontName);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to write CFF Name");
			break;
		}

		status = WriteTopIndex();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to write Top Index");
			break;
		}

		status = WriteStringIndex();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to write String Index");
			break;
		}

		status = WriteGlobalSubrsIndex();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to write global subrs index");
			break;
		}

		status = WriteEncodings(inSubsetGlyphIDs);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to write encodings");
			break;
		}

		status = WriteCharsets(inSubsetGlyphIDs,inCIDMapping);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to write charstring");
			break;
		}

		FontDictInfoToByteMap newFDIndexes;

		if(mIsCID)
		{
			DetermineFDArrayIndexes(inSubsetGlyphIDs,newFDIndexes);
			status = WriteFDSelect(inSubsetGlyphIDs,newFDIndexes);
			if(status != PDFHummus::eSuccess)
				break;
		}


		status = WriteCharStrings(inSubsetGlyphIDs);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to write charstring");
			break;
		}

		status = WritePrivateDictionary();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to write private");
			break;
		}

		if(mIsCID)
		{
			status = WriteFDArray(inSubsetGlyphIDs,newFDIndexes);
			if(status != PDFHummus::eSuccess)
				break;
		}

		status = UpdateIndexesAtTopDict();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("CFFEmbeddedFontWriter::CreateCFFSubset, failed to update indexes");
			break;			
		}
	}while(false);

	mOpenTypeFile.CloseFile();
	return status;
}
コード例 #20
0
EStatusCode TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset(	FreeTypeFaceWrapper& inFontInfo, /*consider requiring only the file path...actually i don't need the whole thing*/
																const UIntVector& inSubsetGlyphIDs,
																bool& outNotEmbedded,
																MyStringBuf& outFontProgram)
{
	EStatusCode status;
	unsigned long* locaTable = NULL;

	do
	{
		UIntVector subsetGlyphIDs = inSubsetGlyphIDs;

		status = mTrueTypeFile.OpenFile(inFontInfo.GetFontFilePath());
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG1("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, cannot open true type font file at %s",inFontInfo.GetFontFilePath().c_str());
			break;
		}

		status = mTrueTypeInput.ReadOpenTypeFile(mTrueTypeFile.GetInputStream(),(unsigned short)inFontInfo.GetFontIndex());
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to read true type file");
			break;
		}

		if(mTrueTypeInput.GetOpenTypeFontType() != EOpenTypeTrueType)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, font file is not true type, so there is an exceptions here. expecting true types only");
			break;
		}
	
		// see if font may be embedded
		if(mTrueTypeInput.mOS2Exists && !FSType(mTrueTypeInput.mOS2.fsType).CanEmbed())
		{
			outNotEmbedded = true;
			return PDFHummus::eSuccess;
		}
		else
			outNotEmbedded = false;

		AddDependentGlyphs(subsetGlyphIDs);

		// K. this needs a bit explaining.
		// i want to leave the glyph IDs as they were in the original font.
		// this allows me to write a more comfotable font definition. something which is generic enough
		// this assumption requires that the font will contain the glyphs in their original position
		// to allow that, when the glyph count is smaller than the actual glyphs count, i'm
		// padding with 0 length glyphs (their loca entries just don't move).
		// don't worry - it's perfectly kosher.
		// so - bottom line - the glyphs count will actually be 1 more than the maxium glyph index.
		// and from here i'll just place the glyphs in their original indexes, and fill in the 
		// vacant glyphs with empties.
		mSubsetFontGlyphsCount = subsetGlyphIDs.back() + 1;
		
		mFontFileStream.Assign(&outFontProgram);
		mPrimitivesWriter.SetOpenTypeStream(&mFontFileStream);

		// assign also to some reader streams, so i can read items for checksums calculations
		mFontFileReaderStream.Assign(&outFontProgram);
		mPrimitivesReader.SetOpenTypeStream(&mFontFileReaderStream);


		status = WriteTrueTypeHeader();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write true type header");
			break;
		}

		status = WriteHead();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write head table");
			break;
		}

		status = WriteHHea();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write hhea table");
			break;
		}

		status = WriteHMtx();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write hmtx table");
			break;
		}

		status = WriteMaxp();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write Maxp table");
			break;
		}

		if(mTrueTypeInput.mCVTExists)
		{
			status = WriteCVT();
			if(status != PDFHummus::eSuccess)
			{
				TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write cvt table");
				break;
			}
		}

		if(mTrueTypeInput.mFPGMExists)
		{
			status = WriteFPGM();
			if(status != PDFHummus::eSuccess)
			{
				TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write fpgm table");
				break;
			}
		}

		if(mTrueTypeInput.mPREPExists)
		{
			status = WritePREP();
			if(status != PDFHummus::eSuccess)
			{
				TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write prep table");
				break;	
			}
		}

		status = WriteNAME();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write name table");
			break;	
		}

        if(mTrueTypeInput.mOS2Exists)
        {
            status = WriteOS2();
            if(status != PDFHummus::eSuccess)
            {
                TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write os2 table");
                break;
            }
        }

		status = WriteCMAP();
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write cmap table");
			break;	
		}

		locaTable = new unsigned long[mSubsetFontGlyphsCount+1];

		status = WriteGlyf(subsetGlyphIDs,locaTable);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write prep table");
			break;
		}

		status = WriteLoca(locaTable);
		if(status != PDFHummus::eSuccess)
		{
			TRACE_LOG("TrueTypeEmbeddedFontWriter::CreateTrueTypeSubset, failed to write loca table");
			break;
		}

		status = CreateHeadTableCheckSumAdjustment();
	}while(false);

	delete[] locaTable;
	mTrueTypeFile.CloseFile();
	return status;
}
コード例 #21
0
ファイル: CPythonCounter.cpp プロジェクト: zackkk/MIPS
/*!
* Extracts and stores logical lines of code.
* Determines and extract logical SLOC to place in the result variable
* using addSLOC function. Each time the addSLOC function is called,
* a new logical SLOC is added. This function assumes that the directive
* is handled before it is called.
*
* \param result counter results
* \param line processed physical line of code
* \param lineBak original physical line of code
* \param strLSLOC processed logical string
* \param strLSLOCBak original logical string
* \param paren_cnt count of parenthesis
* \param loopWhiteSpace count of white space to determine loop ends
*/
void CPythonCounter::LSLOC(results* result, string line, size_t lineNumber, string lineBak, string &strLSLOC, string &strLSLOCBak,
						   unsigned int &paren_cnt, UIntVector &loopWhiteSpace)
{
#define CONT_STR_LENGTH 18
	string continuation_str[] = {"is", "in", "not", "+", "-", "*", "/", "=", "<", ">", "|", "&", "%", "^", "\\", "~", ",", "$"};
	
	size_t start = 0;	// starting index of the working string
	size_t i = 0, idx, strSize;
	int n;
	bool trunc_flag = false;
	unsigned int cnt = 0, numWS;
	string exclude = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_$";

	string tmp;

	// process:
	// paren_cnt is used with {} [] ()
	// 1. check if the  current char is in one of the parentheses
	// 2.   if no, check if the line has : or ; (statement separators), except else:
	// 3.      if yes, count and put the statement in the result
	// 4. if the line does not ends with a continuation string or a statement separator (handled)
	//      and the line is not in one of the parentheses
	//      then count and put the statement in the result
	// 5. physical count considers all lines executables (or directives, no declarations)

	// check for loop ends, new loops, and record white space in order to determine ends
	if (print_cmplx)
	{
		// check white space for loop ends
		if (loopWhiteSpace.size() > 0)
		{
			// get white space
			tmp = line;
			tmp = CUtil::TrimString(tmp, -1);
			numWS = (unsigned)(line.length() - tmp.length());

			// check for loop ends
			for (n = (int)loopWhiteSpace.size() - 1; n >= 0; n--)
			{
				if (loopWhiteSpace.at(n) != numWS)
					break;
				else
					loopWhiteSpace.pop_back();
			}
		}

		// check for loop keywords (for, while)
		cnt = 0;
		CUtil::CountTally(line, loop_keywords, cnt, 1, exclude, "", "", NULL);
		if (cnt > 0)
		{
			if (loopWhiteSpace.size() < 1)
			{
				// get white space
				tmp = line;
				tmp = CUtil::TrimString(tmp, -1);
				numWS = (unsigned)(line.length() - tmp.length());
			}

			// add nested loop white space and record nested loop level
			for (i = 0; i < cnt; i++)
			{
				loopWhiteSpace.push_back(numWS);

				if ((unsigned int)result->cmplx_nestloop_count.size() < loopWhiteSpace.size())
					result->cmplx_nestloop_count.push_back(1);
				else
					result->cmplx_nestloop_count[loopWhiteSpace.size()-1]++;
			}
		}
	}

	line = CUtil::TrimString(line);
	lineBak = CUtil::TrimString(lineBak);
	size_t line_length = line.length();
	bool lineContinued = false;

	while (i < line_length)
	{
		switch (line[i])
		{
		case '{': case '[': case '(': // parentheses opener
			paren_cnt++;
			break;
		case '}': case ']': case ')': // parentheses closer
			if (paren_cnt > 0)
				paren_cnt--;
			break;
		}

		// 2. if no parentheses enclosing, and if the char is a statement separator
		if (paren_cnt == 0 && (line[i] == ';' || line[i] == ':'))
		{
			tmp = CUtil::ClearRedundantSpaces(line);
			// if line[..i] is else: then exit the outer if
			if (tmp.rfind("else:") != tmp.length() - 5)
			{
				// 3.
				strSize = CUtil::TruncateLine(i + 1 - start, strLSLOC.length(), this->lsloc_truncate, trunc_flag);
				if (strSize > 0)
				{
					strLSLOC += line.substr(start, i);
					strLSLOCBak += lineBak.substr(start, i);
				}
				if (result->addSLOC(strLSLOCBak, lineNumber, trunc_flag))
				{
					// increase logical SLOC here
					result->exec_lines[LOG]++;
				}
				strLSLOC = strLSLOCBak = "";
				start = i + 1;
			}
			else
				lineContinued = true;
		}
		i++;
	}

	if (paren_cnt == 0)
	{
		// add logical SLOC if the line does not end with a continuation string/char
		if (!lineContinued)
		{
			for (i = 0; i < CONT_STR_LENGTH; i++)
			{
				if (continuation_str[i].length() == 1)
				{
					if (line[line_length - 1] == continuation_str[i][0])
					{
						lineContinued = true;
						break;
					}
				}
				else
				{
					idx = CUtil::FindKeyword(line, continuation_str[i]);
					if (idx != string::npos && idx == line_length - continuation_str[i].length() - 1)
					{
						lineContinued = true;
						break;
					}
				}
			}
		}

		if (!lineContinued)
		{
			strSize = CUtil::TruncateLine(line_length - start, strLSLOC.length(), this->lsloc_truncate, trunc_flag);
			if (strSize > 0)
			{
				strLSLOC += line.substr(start, line_length);
				strLSLOCBak += lineBak.substr(start, line_length);
			}
			if (result->addSLOC(strLSLOCBak, lineNumber, trunc_flag))
			{
				// increase logical SLOC here
				result->exec_lines[LOG]++;
			}
			strLSLOC = strLSLOCBak = "";
		}
		else
		{
			tmp = CUtil::TrimString(line.substr(start, line_length - start));
			strSize = CUtil::TruncateLine(tmp.length(), strLSLOC.length(), this->lsloc_truncate, trunc_flag);
			if (strSize > 0)
			{
				strLSLOC += tmp.substr(0, strSize);
				tmp = CUtil::TrimString(lineBak.substr(start, line_length - start));
				strLSLOCBak += tmp.substr(0, strSize);
			}
		}
	}
	result->exec_lines[PHY]++;
}
コード例 #22
0
EStatusCode CFFEmbeddedFontWriter::WriteEncodings(const UIntVector& inSubsetGlyphIDs)
{
	// if it's a CID. don't bother with encodings (marks as 0)
	if(mIsCID)
	{
		mEncodingPosition = 0;
		return PDFHummus::eSuccess;
	}

	// not CID, write encoding, according to encoding values from the original font
	EncodingsInfo* encodingInfo = mOpenTypeInput.mCFF.mTopDictIndex[0].mEncoding;
	if(encodingInfo->mEncodingStart <= 1)
	{
		mEncodingPosition = encodingInfo->mEncodingStart;
		return PDFHummus::eSuccess;
	}
	else
	{	
		// original font had custom encoding, let's subset it according to just the glyphs we 
		// actually have. but cause i'm lazy i'll just do the first format.

		// figure out if we got supplements
		UIntVector::const_iterator it = inSubsetGlyphIDs.begin();
		ByteAndUShortList supplements;

		for(; it != inSubsetGlyphIDs.end();++it)
		{
			// don't be confused! the supplements is by SID! not GID!
			unsigned short sid = mOpenTypeInput.mCFF.GetGlyphSID(0,*it);

			UShortToByteList::iterator itSupplements = encodingInfo->mSupplements.find(sid);
			if(itSupplements != encodingInfo->mSupplements.end())
			{
				ByteList::iterator itMoreEncoding = itSupplements->second.begin();
				for(; itMoreEncoding != itSupplements->second.end(); ++itMoreEncoding)
					supplements.push_back(ByteAndUShort(*itMoreEncoding,sid));
			}
		}

		mEncodingPosition = mFontFileStream.GetCurrentPosition();

		if(supplements.size() > 0)
			mPrimitivesWriter.WriteCard8(0x80);
		else
			mPrimitivesWriter.WriteCard8(0);

		// assuming that 0 is in the subset glyphs IDs, which does not require encoding
		// get the encodings count
		Byte encodingGlyphsCount = std::min((Byte)(inSubsetGlyphIDs.size()-1),encodingInfo->mEncodingsCount); 

		mPrimitivesWriter.WriteCard8(encodingGlyphsCount);
		for(Byte i=0; i < encodingGlyphsCount;++i)
		{
			if(inSubsetGlyphIDs[i+1] < encodingInfo->mEncodingsCount)
				mPrimitivesWriter.WriteCard8(encodingInfo->mEncoding[inSubsetGlyphIDs[i+1]-1]);
			else
				mPrimitivesWriter.WriteCard8(0);
		}

		if(supplements.size() > 0)
		{
			mPrimitivesWriter.WriteCard8(Byte(supplements.size()));
			ByteAndUShortList::iterator itCollectedSupplements = supplements.begin();

			for(; itCollectedSupplements != supplements.end(); ++itCollectedSupplements)
			{
				mPrimitivesWriter.WriteCard8(itCollectedSupplements->first);
				mPrimitivesWriter.WriteCard16(itCollectedSupplements->second);
			}
		}
	}

	return mPrimitivesWriter.GetInternalState();
}