// TODO(drott): crbug.com/623940 Fix lack of context sensitive case mapping here. void CaseMappingHarfBuzzBufferFiller::fillSlowCase( CaseMapIntend caseMapIntend, AtomicString locale, const UChar* buffer, unsigned bufferLength, unsigned startIndex, unsigned numCharacters) { // Record pre-context. hb_buffer_add_utf16(m_harfBuzzBuffer, toUint16(buffer), bufferLength, startIndex, 0); for (unsigned charIndex = startIndex; charIndex < startIndex + numCharacters;) { unsigned newCharIndex = charIndex; U16_FWD_1(buffer, newCharIndex, numCharacters); String charByChar(&buffer[charIndex], newCharIndex - charIndex); String caseMappedChar; if (caseMapIntend == CaseMapIntend::UpperCase) caseMappedChar = charByChar.upper(locale); else caseMappedChar = charByChar.lower(locale); for (unsigned j = 0; j < caseMappedChar.length();) { UChar32 codepoint = 0; U16_NEXT(caseMappedChar.characters16(), j, caseMappedChar.length(), codepoint); // Add all characters of the case mapping result at the same cluster position. hb_buffer_add(m_harfBuzzBuffer, codepoint, charIndex); } charIndex = newCharIndex; } // Record post-context hb_buffer_add_utf16(m_harfBuzzBuffer, toUint16(buffer), bufferLength, startIndex + numCharacters, 0); }
NITFPRIV(NITF_BOOL) toUint(nitf_Field * field, NITF_DATA * outData, size_t length, nitf_Error * error) { /* First we have to figure out what we are storing... See, its okay to convert a BCS-N to an int, and... its also okay to maintain a binary int... */ NITF_BOOL status = NITF_FAILURE; if (field->type == NITF_BINARY) { switch (field->length) { case 2: status = toUint16(field, (nitf_Uint16 *) outData, error); break; case 4: status = toUint32(field, (nitf_Uint32 *) outData, error); break; case 8: status = toUint64(field, (nitf_Uint64 *) outData, error); break; default: nitf_Error_initf(error, NITF_CTXT, NITF_ERR_INVALID_PARAMETER, "Unexpected field size for uint [%d]", field->length); } } else { status = fromStringToUint(field, outData, length, error); } return status; }
static inline void addToHarfBuzzBufferInternal(hb_buffer_t* buffer, const FontDescription& fontDescription, const UChar* normalizedBuffer, unsigned normalizedBufferLength, unsigned startIndex, unsigned numCharacters) { // TODO: Revisit whether we can always fill the hb_buffer_t with the // full run text, but only specify startIndex and numCharacters for the part // to be shaped. Then simplify/change the complicated index computations in // extractShapeResults(). if (fontDescription.variant() == FontVariantSmallCaps) { String upperText = String(normalizedBuffer, normalizedBufferLength) .upper(); // TextRun is 16 bit, therefore upperText is 16 bit, even after we call // makeUpper(). ASSERT(!upperText.is8Bit()); hb_buffer_add_utf16(buffer, toUint16(upperText.characters16()), normalizedBufferLength, startIndex, numCharacters); } else { hb_buffer_add_utf16(buffer, toUint16(normalizedBuffer), normalizedBufferLength, startIndex, numCharacters); } }
CaseMappingHarfBuzzBufferFiller::CaseMappingHarfBuzzBufferFiller( CaseMapIntend caseMapIntend, AtomicString locale, hb_buffer_t* harfBuzzBuffer, const UChar* buffer, unsigned bufferLength, unsigned startIndex, unsigned numCharacters) : m_harfBuzzBuffer(harfBuzzBuffer) { if (caseMapIntend == CaseMapIntend::KeepSameCase) { hb_buffer_add_utf16(m_harfBuzzBuffer, toUint16(buffer), bufferLength, startIndex, numCharacters); } else { String caseMappedText; if (caseMapIntend == CaseMapIntend::UpperCase) { caseMappedText = String(buffer, bufferLength).upper(locale); } else { caseMappedText = String(buffer, bufferLength).lower(locale); } if (caseMappedText.length() != bufferLength) { fillSlowCase(caseMapIntend, locale, buffer, bufferLength, startIndex, numCharacters); return; } ASSERT(caseMappedText.length() == bufferLength); ASSERT(!caseMappedText.is8Bit()); hb_buffer_add_utf16(m_harfBuzzBuffer, toUint16(caseMappedText.characters16()), bufferLength, startIndex, numCharacters); } }
void KernFeature::makeCoverage() { if ( GPOSTableRaw.isEmpty() ) return; quint16 FeatureList_Offset= toUint16 ( 6 ); quint16 LookupList_Offset = toUint16 ( 8 ); // Find the offsets of the kern feature tables quint16 FeatureCount = toUint16 ( FeatureList_Offset );; QList<quint16> FeatureKern_Offset; for ( quint16 FeatureRecord ( 0 ); FeatureRecord < FeatureCount; ++ FeatureRecord ) { int rawIdx ( FeatureList_Offset + 2 + ( 6 * FeatureRecord ) ); quint32 tag ( FT_MAKE_TAG ( GPOSTableRaw.at ( rawIdx ), GPOSTableRaw.at ( rawIdx + 1 ), GPOSTableRaw.at ( rawIdx + 2 ), GPOSTableRaw.at ( rawIdx + 3 ) ) ); if ( tag == TTAG_kern ) { FeatureKern_Offset << ( toUint16 ( rawIdx + 4 ) + FeatureList_Offset ); } } // Extract indices of lookups for feture kern QList<quint16> LookupListIndex; foreach ( quint16 kern, FeatureKern_Offset ) { quint16 LookupCount ( toUint16 ( kern + 2 ) ); for ( int llio ( 0 ) ; llio < LookupCount; ++llio ) { quint16 Idx ( toUint16 ( kern + 4 + ( llio * 2 ) ) ); if ( !LookupListIndex.contains ( Idx ) ) { LookupListIndex <<Idx ; } } }
// investigated, instead of being silently accepted. if (getSciVersion() <= SCI_VERSION_0_LATE && (toSint16() < 0 || right.toSint16() < 0)) warning("Modulo of a negative number has been requested for SCI0. This *could* lead to issues"); int16 value = toSint16(); int16 modulo = ABS(right.toSint16()); int16 result = value % modulo; if (result < 0) result += modulo; return make_reg(0, result); } else return lookForWorkaround(right, "modulo"); } reg_t reg_t::operator>>(const reg_t right) const { if (isNumber() && right.isNumber()) return make_reg(0, toUint16() >> right.toUint16()); else return lookForWorkaround(right, "shift right"); } reg_t reg_t::operator<<(const reg_t right) const { if (isNumber() && right.isNumber()) return make_reg(0, toUint16() << right.toUint16()); else return lookForWorkaround(right, "shift left"); } reg_t reg_t::operator+(int16 right) const { return *this + make_reg(0, right); }