FieldCacheAuto::~FieldCacheAuto(){ if ( contentType == FieldCacheAuto::INT_ARRAY ){ _CLDELETE_ARRAY(intArray); }else if ( contentType == FieldCacheAuto::FLOAT_ARRAY ){ _CLDELETE_ARRAY(floatArray); }else if ( contentType == FieldCacheAuto::STRING_INDEX ){ _CLDELETE(stringIndex); }else if ( contentType == FieldCacheAuto::STRING_ARRAY ){ if ( ownContents ){ for ( int32_t i=0;i<contentLen;i++ ) _CLDELETE_CARRAY(stringArray[i]); } _CLDELETE_ARRAY(stringArray); }else if ( contentType == FieldCacheAuto::COMPARABLE_ARRAY ){ if ( ownContents ){ for ( int32_t i=0;i<contentLen;i++ ) _CLDELETE(comparableArray[i]); } _CLDELETE_ARRAY(comparableArray); }else if ( contentType == FieldCacheAuto::SORT_COMPARATOR ){ _CLDELETE(sortComparator); }else if ( contentType == FieldCacheAuto::SCOREDOC_COMPARATOR ){ _CLDELETE(scoreDocComparator); } }
TCHAR* ChainedFilter::toString() { Filter** filter = filters; StringBuffer buf(_T("ChainedFilter: [")); int* la = logicArray; while(*filter ) { if ( filter != filters ) buf.appendChar(' '); buf.append(getLogicString(logic==-1?*la:logic)); buf.appendChar(' '); TCHAR* filterstr = (*filter)->toString(); buf.append(filterstr); _CLDELETE_ARRAY( filterstr ); filter++; if ( logic == -1 ) la++; } buf.appendChar(']'); return buf.toString(); }
//static Query* Query::mergeBooleanQueries(Query** queries) { CL_NS(util)::CLVector<BooleanClause*> allClauses; int32_t i = 0; while ( queries[i] != NULL ){ BooleanQuery* bq = (BooleanQuery*)queries[i]; int32_t size = bq->getClauseCount(); BooleanClause** clauses = _CL_NEWARRAY(BooleanClause*, size); bq->getClauses(clauses); for (int32_t j = 0;j<size;++j ){ allClauses.push_back(clauses[j]); j++; } _CLDELETE_ARRAY(clauses); i++; } BooleanQuery* result = _CLNEW BooleanQuery(); CL_NS(util)::CLVector<BooleanClause*>::iterator itr = allClauses.begin(); while (itr != allClauses.end() ) { result->add(*itr); } return result; }
void Term::set(const TCHAR* fld, const TCHAR* txt,bool internField) { CND_PRECONDITION(fld != NULL, "fld contains NULL"); CND_PRECONDITION(txt != NULL, "txt contains NULL"); //save field for unintern later const TCHAR* oldField = _field; cachedHashCode = 0; textLen = _tcslen(txt); //Delete text if it is the owner #ifdef LUCENE_TERM_TEXT_LENGTH if (textLen > LUCENE_TERM_TEXT_LENGTH) textLen = LUCENE_TERM_TEXT_LENGTH; _tcsncpy(_text,txt,textLen+1); _text[textLen]=0; #else //if the term text buffer is bigger than what we have if (_text && textLen > textLenBuf) { if (_text != LUCENE_BLANK_STRING) { _CLDELETE_ARRAY(_text); } else { _text = NULL; } textLenBuf = 0; } if (_text == LUCENE_BLANK_STRING) { _text = LUCENE_BLANK_STRING; } else if (_text == NULL) { if (txt[0] == 0) { //if the string is blank and we aren't re-using the buffer... _text = LUCENE_BLANK_STRING; } else { //duplicate the text _text = stringDuplicate(txt); textLenBuf = textLen; } } else { //re-use the buffer _tcscpy(_text,txt); } #endif //Set Term Field if (internField) { _field = CLStringIntern::intern(fld CL_FILELINE); } else { _field = fld; } //unintern old field after interning new one, if (internF) CLStringIntern::unintern(oldField); internF = internField; CND_PRECONDITION(_tcscmp(fld, _field) == 0, "field not equal"); }
Sort::Sort() { fields=NULL; SortField** fields=_CL_NEWARRAY(SortField*,3); fields[0]=SortField::FIELD_SCORE; fields[1]=SortField::FIELD_DOC; fields[2]=NULL; setSort (fields); _CLDELETE_ARRAY(fields); }
QueryScorer::QueryScorer(const Query * query): _uniqueTermsInFragment(true), _termsToFind(false,true) { WeightedTerm** _weighted_terms = QueryTermExtractor::getTerms(query); initialize(_weighted_terms); _CLDELETE_ARRAY(_weighted_terms); }
QCLuceneMultiSearcher::QCLuceneMultiSearcher(const QList<QCLuceneSearchable> searchables) : QCLuceneSearcher() { lucene::search::Searchable** list= _CL_NEWARRAY(lucene::search::Searchable*, searchables.count()); d->searchable = new lucene::search::MultiSearcher(list); _CLDELETE_ARRAY(list); }
int32_t SegmentMerger::mergeFields() { //Func - Merge the fields of all segments //Pre - true //Post - The field infos and field values of all segments have been merged. //Create a new FieldInfos fieldInfos = _CLNEW FieldInfos(); // merge field names //Condition check to see if fieldInfos points to a valid instance CND_CONDITION(fieldInfos != NULL, "Memory allocation for fieldInfos failed"); IndexReader* reader = NULL; int32_t docCount = 0; //Iterate through all readers for (uint32_t i = 0; i < readers.size(); i++) { //get the i-th reader reader = readers[i]; //Condition check to see if reader points to a valid instance CND_CONDITION(reader != NULL,"No IndexReader found"); StringArrayWithDeletor tmp; tmp.clear(); reader->getFieldNames(IndexReader::TERMVECTOR_WITH_POSITION_OFFSET, tmp); addIndexed(reader, fieldInfos, tmp, true, true, true); tmp.clear(); reader->getFieldNames(IndexReader::TERMVECTOR_WITH_POSITION, tmp); addIndexed(reader, fieldInfos, tmp, true, true, false); tmp.clear(); reader->getFieldNames(IndexReader::TERMVECTOR_WITH_OFFSET, tmp); addIndexed(reader, fieldInfos, tmp, true, false, true); tmp.clear(); reader->getFieldNames(IndexReader::TERMVECTOR, tmp); addIndexed(reader, fieldInfos, tmp, true, false, false); tmp.clear(); reader->getFieldNames(IndexReader::INDEXED, tmp); addIndexed(reader, fieldInfos, tmp, false, false, false); tmp.clear(); reader->getFieldNames(IndexReader::UNINDEXED, tmp); if (tmp.size() > 0) { TCHAR** arr = _CL_NEWARRAY(TCHAR*,tmp.size()+1); tmp.toArray(arr); fieldInfos->add((const TCHAR**)arr, false); _CLDELETE_ARRAY(arr); //no need to delete the contents, since tmp is responsible for it } }
void testEndThreadException(CuTest *tc) { const int MAX_DOCS=1500; RAMDirectory ram; WhitespaceAnalyzer an; IndexWriter* writer = _CLNEW IndexWriter(&ram, &an, true); // add some documents Document doc; for (int i = 0; i < MAX_DOCS; i++) { TCHAR * tmp = English::IntToEnglish(i); doc.add(* new Field(_T("content"), tmp, Field::STORE_YES | Field::INDEX_UNTOKENIZED)); writer->addDocument(&doc); doc.clear(); _CLDELETE_ARRAY( tmp ); } CuAssertEquals(tc, MAX_DOCS, writer->docCount()); writer->close(); _CLLDELETE(writer); // this sequence is OK: delete searcher after search thread finish { IndexSearcher * searcher = _CLNEW IndexSearcher(&ram); _LUCENE_THREADID_TYPE thread = _LUCENE_THREAD_CREATE(&searchDocs, searcher); SCOPED_LOCK_MUTEX(searchMutex); CONDITION_WAIT(searchMutex, searchCondition); // _LUCENE_SLEEP(9999); //make sure that deleteMutex is being waited on... CONDITION_NOTIFYALL(deleteCondition); _LUCENE_THREAD_JOIN(thread); searcher->close(); _CLLDELETE(searcher); } // this produces memory exception: delete searcher after search finish but before thread finish { IndexSearcher * searcher = _CLNEW IndexSearcher(&ram); _LUCENE_THREADID_TYPE thread = _LUCENE_THREAD_CREATE(&searchDocs, searcher); SCOPED_LOCK_MUTEX(searchMutex); CONDITION_WAIT(searchMutex, searchCondition); searcher->close(); _CLLDELETE(searcher); CONDITION_NOTIFYALL(deleteCondition); _LUCENE_THREAD_JOIN(thread); } ram.close(); }
MultiReader::~MultiReader() { //Func - Destructor //Pre - true //Post - The instance has been destroyed all IndexReader instances // this instance managed have been destroyed to _CLDELETE_ARRAY(ones); _CLDELETE_ARRAY(starts); if (_own) { //Iterate through the subReaders and destroy each reader if (subReaders && subReadersLength > 0) { for (int32_t i = 0; i < subReadersLength; i++) { _CLDELETE(subReaders[i]); } } //Destroy the subReaders array _CLDELETE_ARRAY(subReaders); } }
TCHAR* Document::toString() const { StringBuffer ret(_T("Document<")); for (DocumentFieldEnumeration::DocumentFieldList* list = fieldList; list != NULL; list = list->next) { TCHAR* tmp = list->field->toString(); ret.append( tmp ); if (list->next != NULL) ret.append(_T(" ")); _CLDELETE_ARRAY( tmp ); } ret.append(_T(">")); return ret.toString(); }
void Sort::clear(){ if ( fields != NULL ){ int32_t i=0; while ( fields[i] != NULL ){ if ( fields[i] != SortField::FIELD_SCORE && fields[i] != SortField::FIELD_DOC ){ _CLDELETE(fields[i]); } i++; } _CLDELETE_ARRAY(fields); } }
void testRAMDirectorySize(CuTest * tc) { MockRAMDirectory * ramDir = _CLNEW MockRAMDirectory(indexDir); WhitespaceAnalyzer analyzer; IndexWriter * writer = _CLNEW IndexWriter(ramDir, &analyzer, false); writer->optimize(); CuAssertTrue(tc, ramDir->sizeInBytes == ramDir->getRecomputedSizeInBytes(), _T("RAMDir size")); _LUCENE_THREADID_TYPE* threads = _CL_NEWARRAY(_LUCENE_THREADID_TYPE, numThreads); ThreadData * tdata = _CL_NEWARRAY(ThreadData, numThreads); for (int i=0; i<numThreads; i++) { tdata[i].num = i; tdata[i].dir = ramDir; tdata[i].tc = tc; tdata[i].writer = writer; threads[i] = _LUCENE_THREAD_CREATE(&indexDocs, &tdata[i]); } for (int i=0; i<numThreads; i++) { _LUCENE_THREAD_JOIN(threads[i]); } _CLDELETE_ARRAY(threads); _CLDELETE_ARRAY(tdata); writer->optimize(); CuAssertTrue(tc, ramDir->sizeInBytes == ramDir->getRecomputedSizeInBytes(), _T("RAMDir size")); CuAssertEquals(tc, docsToAdd + (numThreads * (docsPerThread-1)), writer->docCount(), _T("document count")); writer->close(); _CLLDELETE(writer); ramDir->close(); _CLLDELETE(ramDir); }
void QueryTermExtractor::getTermsFromBooleanQuery(const BooleanQuery * query, WeightedTermList * terms, bool prohibited, const TCHAR* fieldName) { uint32_t numClauses = query->getClauseCount(); BooleanClause** queryClauses = _CL_NEWARRAY(BooleanClause*,numClauses); query->getClauses(queryClauses); for (uint32_t i = 0; i < numClauses; i++) { if (prohibited || !queryClauses[i]->prohibited){ Query* qry = queryClauses[i]->getQuery(); getTerms(qry, terms, prohibited, fieldName); } } _CLDELETE_ARRAY(queryClauses); }
SegmentReader::Norm::~Norm() { //Func - Destructor //Pre - true //Post - The IndexInput in has been deleted (and closed by its destructor) // and the array too. //Close and destroy the inputstream in-> The inputstream will be closed // by its destructor. Note that the IndexInput 'in' actually is a pointer!!!!! if ( in != _this->singleNormStream ) _CLDELETE(in); //Delete the bytes array _CLDELETE_ARRAY(bytes); }
_LUCENE_THREAD_FUNC(indexDocs, _data) { ThreadData * data = (ThreadData *)_data; int cnt = 0; TCHAR * text; for (int j=1; j<docsPerThread; j++) { Document doc; text = English::IntToEnglish(data->num*docsPerThread+j); doc.add(*new Field(_T("sizeContent"), text, Field::STORE_YES | Field::INDEX_UNTOKENIZED)); data->writer->addDocument(&doc); _CLDELETE_ARRAY(text); { SCOPED_LOCK_MUTEX(data->dir->THIS_LOCK); CuAssertTrue(data->tc, data->dir->sizeInBytes == data->dir->getRecomputedSizeInBytes()); } } _LUCENE_THREAD_FUNC_RETURN( 0 ); }
void SegmentMergeInfo::close() { //Func - Closes the the resources //Pre - true //Post - The resources have been closed //First make sure posting has been closed if ( postings != NULL ){ postings->close(); _CLVDELETE(postings); //todo: not a clucene object... should be } if ( termEnum != NULL ){ termEnum->close(); _CLDELETE(termEnum); } _CLDECDELETE(term); _CLDELETE_ARRAY(docMap); }
// setup the index void testRAMDirectorySetUp (CuTest *tc) { if (strlen(cl_tempDir) + 13 > CL_MAX_PATH) CuFail(tc, _T("Not enough space in indexDir buffer")); sprintf(indexDir, "%s/RAMDirIndex", cl_tempDir); WhitespaceAnalyzer analyzer; IndexWriter * writer = new IndexWriter(indexDir, &analyzer, true); // add some documents TCHAR * text; for (int i = 0; i < docsToAdd; i++) { Document doc; text = English::IntToEnglish(i); doc.add(* new Field(_T("content"), text, Field::STORE_YES | Field::INDEX_UNTOKENIZED)); writer->addDocument(&doc); _CLDELETE_ARRAY(text); } CuAssertEquals(tc, docsToAdd, writer->docCount(), _T("document count")); writer->close(); _CLDELETE( writer ); }
/* Run one indexer and 2 searchers against single index as stress test. */ void runThreadingTests(CuTest* tc, Directory& directory){ SimpleAnalyzer ANALYZER; IndexWriter writer(&directory, &ANALYZER, true); // Establish a base index of 100 docs: StringBuffer sb; TCHAR buf[10]; for(int i=0;i<100;i++) { Document d; _i64tot(i,buf,10); d.add(*_CLNEW Field(_T("id"), buf, Field::STORE_YES | Field::INDEX_UNTOKENIZED)); sb.clear(); English::IntToEnglish(i, &sb); d.add(*_CLNEW Field(_T("contents"), sb.getBuffer(), Field::STORE_NO | Field::INDEX_TOKENIZED)); writer.addDocument(&d); } writer.flush(); //read using multiple threads... atomicSearchThreads = _CL_NEWARRAY(_LUCENE_THREADID_TYPE, 4); atomicSearchThreads[0] = _LUCENE_THREAD_CREATE(&atomicIndexTest, &writer); atomicSearchThreads[1] = _LUCENE_THREAD_CREATE(&atomicIndexTest, &writer); atomicSearchThreads[2] = _LUCENE_THREAD_CREATE(&atomicSearchTest, &directory); atomicSearchThreads[3] = _LUCENE_THREAD_CREATE(&atomicSearchTest, &directory); for ( int i=0;i<4;i++ ){ _LUCENE_THREAD_JOIN(atomicSearchThreads[i]); } _CLDELETE_ARRAY(atomicSearchThreads); writer.close(); CuAssert(tc, _T("hit unexpected exception in one of the threads\n"), !atomicSearchFailed); }
~Internal(){ _CLDELETE_ARRAY(ones); _CLDELETE_ARRAY(closeOnClose); }
int32_t SegmentMerger::mergeFields() { //Func - Merge the fields of all segments //Pre - true //Post - The field infos and field values of all segments have been merged. if (!mergeDocStores) { // When we are not merging by doc stores, that means // all segments were written as part of a single // autoCommit=false IndexWriter session, so their field // name -> number mapping are the same. So, we start // with the fieldInfos of the last segment in this // case, to keep that numbering. assert(readers[readers.size()-1]->instanceOf(SegmentReader::getClassName())); assert(false);//check last...and remove if correct... SegmentReader* sr = (SegmentReader*)readers[readers.size()-1]; fieldInfos = sr->fieldInfos()->clone(); } else { //Create a new FieldInfos fieldInfos = _CLNEW FieldInfos(); // merge field names } //Condition check to see if fieldInfos points to a valid instance CND_CONDITION(fieldInfos != NULL,"Memory allocation for fieldInfos failed"); IndexReader* reader = NULL; //Iterate through all readers for (uint32_t i = 0; i < readers.size(); i++){ //get the i-th reader reader = readers[i]; //Condition check to see if reader points to a valid instance CND_CONDITION(reader != NULL,"No IndexReader found"); if (reader->instanceOf(SegmentReader::getClassName())) { SegmentReader* segmentReader = (SegmentReader*) reader; for (size_t j = 0; j < segmentReader->getFieldInfos()->size(); j++) { FieldInfo* fi = segmentReader->getFieldInfos()->fieldInfo(j); fieldInfos->add(fi->name, fi->isIndexed, fi->storeTermVector, fi->storePositionWithTermVector, fi->storeOffsetWithTermVector, !reader->hasNorms(fi->name), fi->storePayloads); } } else { StringArrayWithDeletor tmp; tmp.clear(); reader->getFieldNames(IndexReader::TERMVECTOR_WITH_POSITION_OFFSET, tmp); addIndexed(reader, fieldInfos, tmp, true, true, true, false); tmp.clear(); reader->getFieldNames(IndexReader::TERMVECTOR_WITH_POSITION, tmp); addIndexed(reader, fieldInfos, tmp, true, true, false, false); tmp.clear(); reader->getFieldNames(IndexReader::TERMVECTOR_WITH_OFFSET, tmp); addIndexed(reader, fieldInfos, tmp, true, false, true, false); tmp.clear(); reader->getFieldNames(IndexReader::TERMVECTOR, tmp); addIndexed(reader, fieldInfos, tmp, true, false, false, false); tmp.clear(); reader->getFieldNames(IndexReader::STORES_PAYLOADS, tmp); addIndexed(reader, fieldInfos, tmp, false, false, false, true); tmp.clear(); reader->getFieldNames(IndexReader::INDEXED, tmp); addIndexed(reader, fieldInfos, tmp, false, false, false, false); tmp.clear(); reader->getFieldNames(IndexReader::UNINDEXED, tmp); if ( tmp.size() > 0 ){ TCHAR** arr = _CL_NEWARRAY(TCHAR*,tmp.size()+1); tmp.toArray_nullTerminated(arr); fieldInfos->add((const TCHAR**)arr, false); _CLDELETE_ARRAY(arr); //no need to delete the contents, since tmp is responsible for it } } }
MultiSearcher::~MultiSearcher() { _CLDELETE_ARRAY(searchables); _CLDELETE_ARRAY(starts); }
void BufferedIndexInput::close(){ _CLDELETE_ARRAY(buffer); bufferLength = 0; bufferPosition = 0; bufferStart = 0; }
DefaultSkipListWriter::~DefaultSkipListWriter(){ _CLDELETE_ARRAY(lastSkipDoc); _CLDELETE_ARRAY(lastSkipPayloadLength); _CLDELETE_ARRAY(lastSkipFreqPointer); _CLDELETE_ARRAY(lastSkipProxPointer); }
FSLock::~FSLock() { _CLDELETE_ARRAY( lockFile ); _CLDELETE_LCaARRAY( lockDir ); }