CLuceneError::CLuceneError(int num, const TCHAR* str, bool ownstr) { error_number = 0; _awhat=NULL; _twhat=STRDUP_TtoT(str); if ( ownstr ) _CLDELETE_CARRAY(str); }
CLuceneError::CLuceneError(const CLuceneError& clone) { this->error_number = clone.error_number; this->_awhat = NULL; this->_twhat = NULL; if ( clone._awhat != NULL ) this->_awhat = STRDUP_AtoA(clone._awhat); if ( clone._twhat != NULL ) this->_twhat = STRDUP_TtoT(clone._twhat); }
FuzzyTermEnum::FuzzyTermEnum(IndexReader* reader, Term* term, float_t minSimilarity, size_t _prefixLength): FilteredTermEnum(),d(NULL),dLen(0),_similarity(0),_endEnum(false),searchTerm(_CL_POINTER(term)), text(NULL),textLen(0),prefix(NULL)/* ISH: was STRDUP_TtoT(LUCENE_BLANK_STRING)*/,prefixLength(0), minimumSimilarity(minSimilarity) { CND_PRECONDITION(term != NULL,"term is NULL"); if (minSimilarity >= 1.0f) _CLTHROWA(CL_ERR_IllegalArgument,"minimumSimilarity cannot be greater than or equal to 1"); else if (minSimilarity < 0.0f) _CLTHROWA(CL_ERR_IllegalArgument,"minimumSimilarity cannot be less than 0"); scale_factor = 1.0f / (1.0f - minimumSimilarity); // only now we are safe from a division by zero //TODO: this.field = searchTerm.field(); //The prefix could be longer than the word. //It's kind of silly though. It means we must match the entire word. const size_t fullSearchTermLength = searchTerm->textLength(); const size_t realPrefixLength = _prefixLength > fullSearchTermLength ? fullSearchTermLength : _prefixLength; text = STRDUP_TtoT(searchTerm->text() + realPrefixLength); textLen = fullSearchTermLength - realPrefixLength; prefix = _CL_NEWARRAY(TCHAR,realPrefixLength+1); _tcsncpy(prefix, searchTerm->text(), realPrefixLength); prefix[realPrefixLength]='\0'; prefixLength = realPrefixLength; initializeMaxDistances(); Term* trm = _CLNEW Term(searchTerm->field(), prefix); // _CLNEW Term(term, prefix); -- not intern'd? setEnum(reader->terms(trm)); _CLLDECDELETE(trm); /* LEGACY: //Initialize e to NULL e = NULL; eWidth = 0; eHeight = 0; if(prefixLength > 0 && prefixLength < textLen){ this->prefixLength = prefixLength; prefix = _CL_NEWARRAY(TCHAR,prefixLength+1); _tcsncpy(prefix,text,prefixLength); prefix[prefixLength]='\0'; textLen = prefixLength; text[textLen]='\0'; } */ }
QueryParser::QueryParser(const TCHAR* _field, Analyzer* _analyzer) : QueryParserBase(_analyzer){ //Func - Constructor. // Instantiates a QueryParser for the named field _field //Pre - _field != NULL //Post - An instance has been created if ( _field ) field = STRDUP_TtoT(_field); else field = NULL; tokens = NULL; lowercaseExpandedTerms = true; }
QCLuceneStandardAnalyzer::QCLuceneStandardAnalyzer(const QStringList &stopWords) { const TCHAR **tArray = new const TCHAR*[stopWords.count() +1]; for(int i = 0; i < stopWords.count(); ++i) { TCHAR *stopWord = QStringToTChar(stopWords.at(i)); tArray[i] = STRDUP_TtoT(stopWord); delete [] stopWord; } tArray[stopWords.count()] = 0; d->analyzer = new lucene::analysis::standard::StandardAnalyzer(tArray); }
ConstantScoreRangeQuery::ConstantScoreRangeQuery(const TCHAR* _fieldName, const TCHAR* _lowerVal, const TCHAR* _upperVal, bool _includeLower, bool _includeUpper) : fieldName(NULL), lowerVal(NULL), upperVal(NULL) { // do a little bit of normalization... // open ended range queries should always be inclusive. if (_lowerVal==NULL) { _includeLower=true; } else if (_includeLower && _tcscmp(_lowerVal, _T(""))==0) { _lowerVal=NULL; } if (_upperVal==NULL) { _includeUpper=true; } this->fieldName = const_cast<TCHAR*>(CLStringIntern::intern(_fieldName)); // intern it, just like terms... if (_lowerVal != NULL) this->lowerVal = STRDUP_TtoT(_lowerVal); if (_upperVal != NULL) this->upperVal = STRDUP_TtoT(_upperVal); this->includeLower = _includeLower; this->includeUpper = _includeUpper; }
void QueryToken::set(TCHAR* value, const int32_t start, const int32_t end, const Types type){ #ifndef LUCENE_TOKEN_WORD_LENGTH _CLDELETE_CARRAY(Value); Value = STRDUP_TtoT(value); #else _tcsncpy(Value,value,LUCENE_TOKEN_WORD_LENGTH); Value[LUCENE_TOKEN_WORD_LENGTH]; #endif this->Start = start; this->End = end; this->Type = type; if ( this->End < 0 ) this->End = _tcslen(Value); }
TCHAR* SimpleHTMLEncoder::htmlEncode(TCHAR* plainText) { size_t plainTextLen = _tcslen(plainText); if (plainText == NULL || plainTextLen == 0) { return STRDUP_TtoT(_T("")); } CL_NS(util)::StringBuffer result(plainTextLen); for (int32_t index=0; index<plainTextLen; index++) { TCHAR ch = plainText[index]; switch (ch) { case '"': result.append(_T(""")); break; case '&': result.append(_T("&")); break; case '<': result.append(_T("<")); break; case '>': result.append(_T(">")); break; default: if (ch < 128) result.appendChar(ch); else{ result.append(_T("&#")); result.appendInt(ch); result.append(_T(";")); } } } return result.toString(); }
void MapFieldSelector::add(const TCHAR* field, FieldSelector::FieldSelectorResult action){ fieldSelections->insert(fieldSelections->end(),std::pair<TCHAR*,FieldSelectorResult>( STRDUP_TtoT(field), action)); }
void PerFieldAnalyzerWrapper::addAnalyzer(const TCHAR* fieldName, Analyzer* analyzer) { analyzerMap.put(STRDUP_TtoT(fieldName), analyzer); }
TCHAR* toString(){ return STRDUP_TtoT(_T("ConstantScorer")); }
ConstantScoreRangeQuery::ConstantScoreRangeQuery( const ConstantScoreRangeQuery& copy ): fieldName(const_cast<TCHAR*>(CLStringIntern::intern(copy.fieldName))), lowerVal(STRDUP_TtoT(copy.lowerVal)), upperVal(STRDUP_TtoT(copy.upperVal)), includeLower(copy.includeLower),includeUpper(copy.includeUpper) { }
TCHAR* RAMDirectory::RAMLock::toString(){ return STRDUP_TtoT(_T("LockFile@RAM")); }
/** Builds the named analyzer with the given stop words. */ BRSnowballAnalyzer::BRSnowballAnalyzer(const TCHAR* language, const TCHAR** stopWords) { this->language = STRDUP_TtoT(language); stopSet = _CLNEW CLTCSetList(true); StopFilter::fillStopTable(stopSet,stopWords); }
/** Builds the named analyzer with no stop words. */ BRSnowballAnalyzer::BRSnowballAnalyzer(const TCHAR* language) { this->language = STRDUP_TtoT(language); stopSet = NULL; }