static int initialize_gpt(struct drive *drive, const char *guid) { GptHeader *h = (GptHeader *)drive->gpt.primary_header; memcpy(h->signature, GPT_HEADER_SIGNATURE, GPT_HEADER_SIGNATURE_SIZE); h->revision = GPT_HEADER_REVISION; h->size = sizeof(GptHeader); h->my_lba = 1; h->alternate_lba = drive->gpt.drive_sectors - 1; h->first_usable_lba = 1 + 1 + GPT_ENTRIES_SECTORS; h->last_usable_lba = drive->gpt.drive_sectors - 1 - GPT_ENTRIES_SECTORS - 1; if (guid) { if (StrToGuid(guid, &h->disk_uuid) != CGPT_OK) { Error("Provided GUID is invalid: \"%s\"\n", guid); return CGPT_FAILED; } } else { if (!uuid_generator) { Error("Unable to generate new GUID. uuid_generator not set.\n"); return CGPT_FAILED; } (*uuid_generator)((uint8_t *)&h->disk_uuid); } h->entries_lba = 2; h->number_of_entries = 128; h->size_of_entry = sizeof(GptEntry); // Copy to secondary RepairHeader(&drive->gpt, MASK_PRIMARY); UpdateCrc(&drive->gpt); return CGPT_OK; }
TInt YModem::ReadPacket(TDes8& aDest) { TUint8* pD = (TUint8*)aDest.Ptr(); TInt r; TPtr8 d(pD, 0, 1); r = ReadBlock(d); if (r != KErrNone) return r; if (d.Length()==0) return KErrZeroLengthPacket; TUint8 b0 = *pD; if (b0==CAN) return KErrAbort; if (b0==EOT) return KErrEof; if (b0==SOH) iBlockSize=128; else if (b0==STX) iBlockSize=1024; else return KErrBadPacketType; iTimeout=5000000; iPacketSize = iBlockSize+5; d.Set(pD+1, 0, iPacketSize-1); r = ReadBlock(d); if (r!=KErrNone && r!=KErrTimedOut) return r; if (d.Length() < iPacketSize-1) return KErrPacketTooShort; TUint8 seq = pD[1]; TUint8 seqbar = pD[2]; seqbar^=seq; if (seqbar != 0xff) return KErrCorruptSequenceNumber; if (seq==iSeqNum) return KErrAlreadyExists; else { TUint8 nextseq=(TUint8)(iSeqNum+1); if (seq!=nextseq) return KErrWrongSequenceNumber; } iCrc=0; UpdateCrc(pD+3, iBlockSize); aDest.SetLength(iPacketSize); TUint16 rx_crc = (TUint16)((pD[iPacketSize-2]<<8) | pD[iPacketSize-1]); if (rx_crc != iCrc) return KErrBadCrc; ++iSeqNum; return KErrNone; }
int GptCreate(struct drive *drive, CgptCreateParams *params) { // Erase the data memset(drive->gpt.primary_header, 0, drive->gpt.sector_bytes * GPT_HEADER_SECTOR); memset(drive->gpt.secondary_header, 0, drive->gpt.sector_bytes * GPT_HEADER_SECTOR); memset(drive->gpt.primary_entries, 0, drive->gpt.sector_bytes * GPT_ENTRIES_SECTORS); memset(drive->gpt.secondary_entries, 0, drive->gpt.sector_bytes * GPT_ENTRIES_SECTORS); drive->gpt.modified |= (GPT_MODIFIED_HEADER1 | GPT_MODIFIED_ENTRIES1 | GPT_MODIFIED_HEADER2 | GPT_MODIFIED_ENTRIES2); // Initialize a blank set if (!params->zap) { GptHeader *h = (GptHeader *)drive->gpt.primary_header; memcpy(h->signature, GPT_HEADER_SIGNATURE, GPT_HEADER_SIGNATURE_SIZE); h->revision = GPT_HEADER_REVISION; h->size = sizeof(GptHeader); h->my_lba = 1; h->alternate_lba = drive->gpt.drive_sectors - 1; h->first_usable_lba = 1 + 1 + GPT_ENTRIES_SECTORS; h->last_usable_lba = drive->gpt.drive_sectors - 1 - GPT_ENTRIES_SECTORS - 1; if (!uuid_generator) { Error("Unable to generate new GUID. uuid_generator not set.\n"); return -1; } (*uuid_generator)((uint8_t *)&h->disk_uuid); h->entries_lba = 2; h->number_of_entries = 128; h->size_of_entry = sizeof(GptEntry); // Copy to secondary RepairHeader(&drive->gpt, MASK_PRIMARY); UpdateCrc(&drive->gpt); } return 0; }
static int GptCreate(struct drive *drive, CgptCreateParams *params) { // Allocate and/or erase the data. // We cannot assume the GPT headers or entry arrays have been allocated // by GptLoad() because those fields might have failed validation checks. AllocAndClear(&drive->gpt.primary_header, drive->gpt.sector_bytes * GPT_HEADER_SECTORS); AllocAndClear(&drive->gpt.secondary_header, drive->gpt.sector_bytes * GPT_HEADER_SECTORS); drive->gpt.modified |= (GPT_MODIFIED_HEADER1 | GPT_MODIFIED_ENTRIES1 | GPT_MODIFIED_HEADER2 | GPT_MODIFIED_ENTRIES2); // Initialize a blank set if (!params->zap) { GptHeader *h = (GptHeader *)drive->gpt.primary_header; memcpy(h->signature, GPT_HEADER_SIGNATURE, GPT_HEADER_SIGNATURE_SIZE); h->revision = GPT_HEADER_REVISION; h->size = sizeof(GptHeader); h->my_lba = GPT_PMBR_SECTORS; /* The second sector on drive. */ h->alternate_lba = drive->gpt.gpt_drive_sectors - GPT_HEADER_SECTORS; if (CGPT_OK != GenerateGuid(&h->disk_uuid)) { Error("Unable to generate new GUID.\n"); return -1; } /* Calculate number of entries */ h->size_of_entry = sizeof(GptEntry); h->number_of_entries = MAX_NUMBER_OF_ENTRIES; if (drive->gpt.flags & GPT_FLAG_EXTERNAL) { // We might have smaller space for the GPT table. Scale accordingly. // // +------+------------+---------------+-----+--------------+-----------+ // | PMBR | Prim. Head | Prim. Entries | ... | Sec. Entries | Sec. Head | // +------+------------+---------------+-----+--------------+-----------+ // // Half the size of gpt_drive_sectors must be big enough to hold PMBR + // GPT Header + Entries Table, though the secondary structures do not // contain PMBR. size_t required_headers_size = (GPT_PMBR_SECTORS + GPT_HEADER_SECTORS) * drive->gpt.sector_bytes; size_t min_entries_size = MIN_NUMBER_OF_ENTRIES * h->size_of_entry; size_t required_min_size = required_headers_size + min_entries_size; size_t half_size = (drive->gpt.gpt_drive_sectors / 2) * drive->gpt.sector_bytes; if (half_size < required_min_size) { Error("Not enough space to store GPT structures. Required %d bytes.\n", required_min_size * 2); return -1; } size_t max_entries = (half_size - required_headers_size) / h->size_of_entry; if (h->number_of_entries > max_entries) { h->number_of_entries = max_entries; } } /* Then use number of entries to calculate entries_lba. */ h->entries_lba = h->my_lba + GPT_HEADER_SECTORS; if (!(drive->gpt.flags & GPT_FLAG_EXTERNAL)) { h->entries_lba += params->padding; h->first_usable_lba = h->entries_lba + CalculateEntriesSectors(h); h->last_usable_lba = (drive->gpt.streaming_drive_sectors - GPT_HEADER_SECTORS - CalculateEntriesSectors(h) - 1); } else { h->first_usable_lba = params->padding; h->last_usable_lba = (drive->gpt.streaming_drive_sectors - 1); } size_t entries_size = h->number_of_entries * h->size_of_entry; AllocAndClear(&drive->gpt.primary_entries, entries_size); AllocAndClear(&drive->gpt.secondary_entries, entries_size); // Copy to secondary RepairHeader(&drive->gpt, MASK_PRIMARY); UpdateCrc(&drive->gpt); } return 0; }
DWORD CDeflateCompressor::Decompress(void *pBuffer, DWORD uSize) { if (m_bDecompressionDone) return 0; DWORD uRead = 0; if (m_pFile->m_uMethod == methodDeflate) { m_stream.next_out = (zarch_Bytef*)pBuffer; m_stream.avail_out = uSize > m_uUncomprLeft ? (DWORD)m_uUncomprLeft : uSize; // may happen when the file is 0 sized bool bForce = m_stream.avail_out == 0 && m_uComprLeft > 0; while (m_stream.avail_out > 0 || (bForce && m_uComprLeft > 0)) { if (m_stream.avail_in == 0 /* && m_uComprLeft >= 0*/ // Also when there are zero bytes left. It should always be true. ) { DWORD uToRead = FillBuffer(); m_stream.next_in = (zarch_Bytef*)(char*)m_pBuffer; m_stream.avail_in = uToRead; } ZIP_SIZE_TYPE uTotal = m_stream.total_out; zarch_Bytef* pOldBuf = m_stream.next_out; int ret = zarch_inflate(&m_stream, Z_SYNC_FLUSH); // will not exceed DWORD DWORD uToCopy = (DWORD)(m_stream.total_out - uTotal); UpdateCrc(pOldBuf, uToCopy); m_uUncomprLeft -= uToCopy; uRead += uToCopy; if (ret == Z_STREAM_END) { m_bDecompressionDone = true; return uRead; } else CheckForError(ret); } if (!uRead && m_options.m_bCheckLastBlock && uSize != 0) { if (zarch_inflate(&m_stream, Z_SYNC_FLUSH) != Z_STREAM_END) // there were no more bytes to read and there was no ending block, // otherwise the method would return earlier ThrowError(CZipException::badZipFile); } } else { if (m_uComprLeft < uSize) uRead = (DWORD)m_uComprLeft; else uRead = uSize; if (uRead > 0) { m_pStorage->Read(pBuffer, uRead, false); if (m_pCryptograph) m_pCryptograph->Decode((char*)pBuffer, uRead); UpdateCrc(pBuffer, uRead); m_uComprLeft -= uRead; m_uUncomprLeft -= uRead; m_stream.total_out += uRead; } } return uRead; }