Example #1
0
void FoxPro::loadHeader(SeekableReadStream &dbf, uint32 &recordSize, uint32 &recordCount,
                        uint32 &firstRecordPos) {

	byte version = dbf.readByte();
	if (version != 0xF5)
		throw Exception("Unknown database version 0x%02X", version);

	int lastUpdateYear  = dbf.readByte() + 2000;
	int lastUpdateMonth = dbf.readByte();
	int lastUpdateDay   = dbf.readByte();

	_lastUpdate = date(lastUpdateYear, lastUpdateMonth, lastUpdateDay);

	recordCount    = dbf.readUint32LE();
	firstRecordPos = dbf.readUint16LE();
	recordSize     = dbf.readUint16LE();

	dbf.skip(16); // Reserved

	byte flags = dbf.readByte();

	_hasIndex = flags & 0x01;
	_hasMemo  = (flags & 0x02) != 0;

	if (flags & 0x04)
		throw Exception("DBC unsupported");
	if (flags & 0xF8)
		throw Exception("Unknown flags 0x%02X", flags);

	dbf.skip(1); // Codepage marker
	dbf.skip(2); // Reserved
}
Example #2
0
void StreamTokenizer::nextChunk(SeekableReadStream &stream) {
	skipChunk(stream);

	byte c = stream.readByte();

	if (stream.eos() || stream.err())
		return;

	if (!isIn(c, _chunkEnds))
		stream.seek(-1, SEEK_CUR);
	else
		if (stream.pos() == stream.size())
			// This actually the last character, read one more byte to properly set the EOS state
			stream.readByte();
}
Example #3
0
void FoxPro::loadRecords(SeekableReadStream &dbf, uint32 recordSize, uint32 recordCount) {
	_pool.push_back(new byte[recordSize * recordCount]);
	byte *recordData = _pool.back();

	if (dbf.read(recordData, recordSize * recordCount) != (recordSize * recordCount))
		throw Exception(kReadError);

	if (dbf.readByte() != 0x1A)
		throw Exception("Record end marker missing");

	uint32 fieldCount = _fields.size();

	// Create the records array
	_records.resize(recordCount);
	for (uint32 i = 0; i < recordCount; i++) {
		Record &record = _records[i];
		const byte *data = recordData + i * recordSize;

		char status = *data++;
		if ((status != ' ') && (status != '*'))
			throw Exception("Unknown record status '%c'", status);

		record.deleted = status == '*';

		record.fields.resize(fieldCount);
		for (uint32 j = 0; j < fieldCount; j++) {
			record.fields[j] = data;
			data += _fields[j].size;
		}
	}
}
Example #4
0
String NEResources::getResourceString(SeekableReadStream &exe, uint32 offset) {
    uint32 curPos = exe.pos();

    if (!exe.seek(offset)) {
        exe.seek(curPos);
        return "";
    }

    uint8 length = exe.readByte();

    String string;
    for (uint16 i = 0; i < length; i++)
        string += (char)exe.readByte();

    exe.seek(curPos);
    return string;
}
Example #5
0
bool StreamTokenizer::isChunkEnd(SeekableReadStream &stream) {
	if (stream.eos())
		return true;

	bool chunkEnd = isIn(stream.readByte(), _chunkEnds);

	stream.seek(-1, SEEK_CUR);

	return chunkEnd;
}
Example #6
0
void StreamTokenizer::skipChunk(SeekableReadStream &stream) {
	assert(!_chunkEnds.empty());

	while (!stream.eos() && !stream.err()) {
		if (isIn(stream.readByte(), _chunkEnds)) {
			stream.seek(-1, SEEK_CUR);
			break;
		}
	}

	if (stream.err())
		throw Exception(kReadError);
}
Example #7
0
void FoxPro::loadFields(SeekableReadStream &dbf, uint32 recordSize) {
	// Read all field descriptions, 0x0D is the end marker
	uint32 fieldsLength = 0;
	while (!dbf.eos() && (dbf.readByte() != 0x0D)) {
		Field field;

		dbf.seek(-1, SeekableReadStream::kOriginCurrent);

		field.name = readStringFixed(dbf, kEncodingASCII, 11);

		field.type     = (Type) dbf.readByte();
		field.offset   = dbf.readUint32LE();
		field.size     = dbf.readByte();
		field.decimals = dbf.readByte();

		field.flags = dbf.readByte();

		field.autoIncNext = dbf.readUint32LE();
		field.autoIncStep = dbf.readByte();

		dbf.skip(8); // Reserved

		if (field.offset != (fieldsLength + 1))
			throw Exception("Field offset makes no sense (%d != %d)",
					field.offset, fieldsLength + 1);

		if (field.type == kTypeMemo)
			_hasMemo = true;

		fieldsLength += field.size;

		_fields.push_back(field);
	}

	if (recordSize != (fieldsLength + 1))
		throw Exception("Length of all fields does not equal the record size");
}
Example #8
0
UString StreamTokenizer::getToken(SeekableReadStream &stream) {
	// Init
	bool   chunkEnd     = false;
	bool   inQuote      = false;
	bool   hasSeparator = false;
	uint32 separator    = 0;

	UString token;

	// Run through the stream, character by character
	while (!stream.eos()) {
		char c = (char) stream.readByte();

		if (isIn(c, _chunkEnds)) {
			// This is a end character, seek back and break
			stream.seek(-1, SEEK_CUR);
			chunkEnd = true;
			break;
		}

		if (isIn(c, _quotes)) {
			// This is a quote character, set state
			inQuote = !inQuote;
			continue;
		}

		if (!inQuote && isIn(c, _separators)) {
			// We're not in a quote and this is a separator

			if (!token.empty()) {
				// We have a token

				hasSeparator = true;
				separator = c;
				break;
			}

			// We don't yet have a token, let the consecutive separator rule decide what to do

			if (_conSepRule == kRuleHeed) {
				// We heed every separator

				hasSeparator = true;
				separator = c;
				break;
			}

			if ((_conSepRule == kRuleIgnoreSame) && hasSeparator && (separator != ((byte) c))) {
				// We ignore only consecutive separators that are the same
				hasSeparator = true;
				separator = c;
				break;
			}

			// We ignore all consecutive separators
			hasSeparator = true;
			separator = c;
			continue;
		}

		if (isIn(c, _ignores))
			// This is a character to be ignored, do so
			continue;

		// A normal character, add it to our token
		token += c;
	}

	// Is the string actually empty?
	if (!token.empty() && (*token.begin() == '\0'))
		token.clear();

	if (!chunkEnd && (_conSepRule != kRuleHeed)) {
		// We have to look for consecutive separators

		while (!stream.eos()) {
			uint32 c = stream.readByte();

			// Use the rule to determine when we should abort skipping consecutive separators
			if (((_conSepRule == kRuleIgnoreSame) && (c != separator)) ||
			    ((_conSepRule == kRuleIgnoreAll ) && !isIn(c, _separators))) {

				stream.seek(-1, SEEK_CUR);
				break;
			}
		}

	}

	// And return the token
	return token;
}