static void byteswapHeader(Elf32_Ehdr &ELF_H) { bswap(ELF_H.e_type); bswap(ELF_H.e_machine); bswap(ELF_H.e_ehsize); bswap(ELF_H.e_phentsize); bswap(ELF_H.e_phnum); bswap(ELF_H.e_shentsize); bswap(ELF_H.e_shnum); bswap(ELF_H.e_shstrndx); bswap(ELF_H.e_version); bswap(ELF_H.e_entry); bswap(ELF_H.e_phoff); bswap(ELF_H.e_shoff); bswap(ELF_H.e_flags); }
u4byte *set_key(const u4byte in_key[], const u4byte key_len) { u4byte lk[8], v[2], lout[8]; u4byte i, j, k, w; if(!lb_init) { for(i = 0; i < 256; ++i) { l_box[0][i] = ((u4byte)(s_box[i])); l_box[1][i] = ((u4byte)(s_box[i])) << 8; l_box[2][i] = ((u4byte)(s_box[i])) << 16; l_box[3][i] = ((u4byte)(s_box[i])) << 24; } lb_init = 1; } v[0] = bswap(v_0); v[1] = bswap(v_1); lk[0] = io_swap(in_key[0]); lk[1] = io_swap(in_key[1]); lk[2] = io_swap(in_key[2]); lk[3] = io_swap(in_key[3]); lk[4] = io_swap(key_len > 128 ? in_key[4] : k2_0); lk[5] = io_swap(key_len > 128 ? in_key[5] : k2_1); lk[6] = io_swap(key_len > 192 ? in_key[6] : k3_0); lk[7] = io_swap(key_len > 192 ? in_key[7] : k3_1); g_fun(lk, lout, v); for(i = 0; i < 8; ++i) { g_fun(lk, lout, v); for(j = 0; j < 4; ++j) { // this is complex because of a byte swap in each 32 bit output word k = 2 * (48 - 16 * j + 2 * (i / 2) - i % 2); ((u1byte*)l_key)[k + 3] = ((u1byte*)lout)[j]; ((u1byte*)l_key)[k + 2] = ((u1byte*)lout)[j + 16]; ((u1byte*)l_key)[k + 19] = ((u1byte*)lout)[j + 8]; ((u1byte*)l_key)[k + 18] = ((u1byte*)lout)[j + 24]; ((u1byte*)l_key)[k + 131] = ((u1byte*)lout)[j + 4]; ((u1byte*)l_key)[k + 130] = ((u1byte*)lout)[j + 20]; ((u1byte*)l_key)[k + 147] = ((u1byte*)lout)[j + 12]; ((u1byte*)l_key)[k + 146] = ((u1byte*)lout)[j + 28]; } } for(i = 52; i < 60; ++i) { l_key[i] |= 1; l_key[i + 12] = mod_inv(l_key[i]); } for(i = 0; i < 48; i += 4) { bp2_fun(l_key[i], l_key[i + 1]); } return (u4byte*)&l_key; };
void unretardify_header(palmdoc_db_header &x) { bswap(x.flags); bswap(x.version); bswap(x.c_time); bswap(x.m_time); bswap(x.b_time); bswap(x.mod_num); bswap(x.app_info); bswap(x.sort_info); bswap(x.u_id_seed); bswap(x.next_record_list); bswap(x.num_records); }
//----------------------------------------------------------------------------- // form_sfaf_sg_list // // This method forms the scatter-gather list understood by SEC firmware. // This is the sequence of steps: // 1. Lock physical pages corresponding to virtual memory by using // get_user_pages Linux Kernel API. // 2. Alloc memory for a scatter-gather element // 3. Set physical start address, length and various parameters in the // scatter-gather element. // 4. get_user_pages returns a reference to every page found in // vir_buffer - vir+buffer+vir_buffer_size range. Whereas, SEC needs // a SG list with elements which are contiguous, irrespective of page // boundaries. Therefore, we need additional manipulation for this. // 5. Loop 2(by expanding memory for SG list) & 3 until all the physical // pages are covered. //----------------------------------------------------------------------------- static sec_result_t form_sfaf_sg_list(user_buf_t * user_buffer, sec_address_t vir_buffer, unsigned long vir_buffer_size, int write, sfaf_mem_ptr_t **sg_list, unsigned int *sg_count ) { sec_result_t rc = SEC_SUCCESS; int xfer_size = 0; int num_pages_processed = 0; //to keep track of pages returned by get_user_pages API bool need_another_sg_entry = true; //to keep track of elements in SG list provided to SEC sfaf_mem_ptr_t * new_sg_list = NULL; int new_sg_count = 0; unsigned long* sg_args = NULL; VERIFY( sg_list != NULL, exit, rc, SEC_NULL_POINTER); VERIFY( sg_count != NULL, exit, rc, SEC_NULL_POINTER); //Step 1 : get_user_pages API call rc = sec_kernel_user_buf_lock( user_buffer, (sec_address_t) vir_buffer, vir_buffer_size, write); VERIFY_QUICK(rc == SEC_SUCCESS, exit); //We traverse the 'user_buffer' list returned by get_user_pages kernel API, while (need_another_sg_entry) { //Step 2: realloc memory for a additional sfaf_const_mem_ptr data structure and //update it's variables new_sg_count = (*sg_count) + 1; new_sg_list = (sfaf_mem_ptr_t*) OS_ALLOC(sizeof(sfaf_mem_ptr_t) * (new_sg_count)); VERIFY( new_sg_list != NULL, exit, rc, SEC_OUT_OF_MEMORY); if (*sg_list) { memcpy(new_sg_list, *sg_list, sizeof(sfaf_mem_ptr_t) * (new_sg_count -1)); OS_FREE(*sg_list); } //Update the method output pointers *sg_count = new_sg_count; *sg_list = new_sg_list; //Step 3: Set SG elements xfer_size = PWU_MIN(user_buffer->page_bytes, user_buffer->size); new_sg_list[new_sg_count - 1].address = (void*)(user_buffer->page_addr + user_buffer->offset); new_sg_list[new_sg_count - 1].length = xfer_size; new_sg_list[new_sg_count - 1].external = 1; new_sg_list[new_sg_count - 1].swap = 1; new_sg_list[new_sg_count - 1].pmr_type = 0; new_sg_list[new_sg_count - 1].rsvd = 0; //swap the endianness for SEC sg_args = (unsigned long *)&(new_sg_list[new_sg_count - 1].external); sg_args[0] = bswap(sg_args[0]); need_another_sg_entry = false; num_pages_processed++; if (num_pages_processed == user_buffer->num_pages) { //we are done processing the buffer...get out now goto exit; } //Step 4: get_user_pages returns a reference to every page found in //vir_buffer - vir+buffer+vir_buffer_size range. Whereas, SEC needs //a SG list with elements which are contiguous, irrespective of page //boundaries. Therefore, we need additional manipulation for this. rc = user_buf_advance( user_buffer, xfer_size); VERIFY_QUICK(rc == SEC_SUCCESS , exit); xfer_size = PWU_MIN(user_buffer->page_bytes, user_buffer->size); while ((num_pages_processed < user_buffer->num_pages) && (!need_another_sg_entry)) { //navigate through for every page in user_buf_t if ((void*)(user_buffer->page_addr + user_buffer->offset) != (new_sg_list[new_sg_count - 1].address + new_sg_list[new_sg_count - 1].length)) { //until you finish the list or the page's start address is not next to previous page's end address //generate another sfaf_const_mem_ptr data structure and follow the process need_another_sg_entry = true; } else { new_sg_list[new_sg_count - 1].length += xfer_size; num_pages_processed++; if (num_pages_processed == user_buffer->num_pages) { //we are done processing the buffer...get out now goto exit; } rc = user_buf_advance( user_buffer, xfer_size); VERIFY_QUICK(rc == SEC_SUCCESS , exit); xfer_size = PWU_MIN(user_buffer->page_bytes, user_buffer->size); } } } exit: return rc; }
inline void ReadFromHardware(T &_var, const u32 em_address, const u32 effective_address, Memory::XCheckTLBFlag flag) { // TODO: Figure out the fastest order of tests for both read and write (they are probably different). if ((em_address & 0xC8000000) == 0xC8000000) { if (em_address < 0xcc000000) _var = EFB_Read(em_address); else _var = mmio_mapping->Read<T>(em_address); } else if (((em_address & 0xF0000000) == 0x80000000) || ((em_address & 0xF0000000) == 0xC0000000) || ((em_address & 0xF0000000) == 0x00000000)) { _var = bswap((*(const T*)&m_pRAM[em_address & RAM_MASK])); } else if (m_pEXRAM && (((em_address & 0xF0000000) == 0x90000000) || ((em_address & 0xF0000000) == 0xD0000000) || ((em_address & 0xF0000000) == 0x10000000))) { _var = bswap((*(const T*)&m_pEXRAM[em_address & EXRAM_MASK])); } else if ((em_address >= 0xE0000000) && (em_address < (0xE0000000+L1_CACHE_SIZE))) { _var = bswap((*(const T*)&m_pL1Cache[em_address & L1_CACHE_MASK])); } else if ((bFakeVMEM && ((em_address &0xF0000000) == 0x70000000)) || (bFakeVMEM && ((em_address &0xF0000000) == 0x40000000))) { // fake VMEM _var = bswap((*(const T*)&m_pFakeVMEM[em_address & FAKEVMEM_MASK])); } else { // MMU // Handle loads that cross page boundaries (ewwww) if (sizeof(T) > 1 && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T)) { _var = 0; // This could be unaligned down to the byte level... hopefully this is rare, so doing it this // way isn't too terrible. // TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions. // Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned! u32 tlb_addr = TranslateAddress(em_address, flag); for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++) { // Start of the new page... translate the address again! if (!(addr & (HW_PAGE_SIZE-1))) tlb_addr = TranslateAddress(addr, flag); // Important: we need to generate the DSI on the first store that caused the fault, NOT // the address of the start of the load. if (tlb_addr == 0) { if (flag == FLAG_READ) { GenerateDSIException(addr, false); break; } } else { _var <<= 8; _var |= m_pRAM[tlb_addr & RAM_MASK]; } } } else { u32 tlb_addr = TranslateAddress(em_address, flag); if (tlb_addr == 0) { if (flag == FLAG_READ) { GenerateDSIException(em_address, false); } } else { _var = bswap((*(const T*)&m_pRAM[tlb_addr & RAM_MASK])); } } } }
// parses the memwatch data format from a string unsigned long long read_string_data(const char* in, void** vdata, void** vmask) { *vdata = NULL; if (vmask) *vmask = NULL; unsigned char** data = (unsigned char**)vdata; unsigned char** mask = (unsigned char**)vmask; int read, chr = 0; int string = 0, unicode_string = 0, comment = 0, multiline_comment = 0, high = 1; int filename = 0, filename_start; unsigned long size = 0; int endian = 0; while (in[0]) { read = 0; // if between // and a newline, don't write to output buffer if (comment) { if (in[0] == '\n') comment = 0; in++; // if between /* and */, don't write to output buffer } else if (multiline_comment) { if (in[0] == '*' && in[1] == '/') { multiline_comment = 0; in++; } in++; // if between quotes, read bytes to output buffer, unescaping } else if (string) { if (in[0] == '\"') string = 0; else if (in[0] == '\\') { // unescape char after a backslash if (!in[1]) return size; if (in[1] == 'n') { write_byte('\n'); } else if (in[1] == 'r') { write_byte('\r'); } else if (in[1] == 't') { write_byte('\t'); } else { write_byte(in[1]); } in++; } else write_byte(in[0]); in++; // if between single quotes, word-expand bytes to output buffer, unescaping } else if (unicode_string) { if (in[0] == '\'') unicode_string = 0; else if (in[0] == '\\') { // unescape char after a backslash if (!in[1]) return size; if (in[1] == 'n') { write_short('\n'); } else if (in[1] == 'r') { write_short('\r'); } else if (in[1] == 't') { write_short('\t'); } else { write_short(in[1]); } if (endian) bswap(&(*data)[size - 2], 2); in++; } else { write_short(in[0]); if (endian) bswap(&(*data)[size - 2], 2); } in++; // if between <>, read a file name, then stick that file into the buffer } else if (filename) { if (in[0] == '>') { filename = 0; write_byte(0); // null-terminate the filename // TODO: support <filename@offset:size> syntax // open the file, read it into the buffer, close the file FILE* f = fopen((char*)(*data + filename_start), "rb"); if (!f) { if (data) free(data); return 0; } fseek(f, 0, SEEK_END); int file_size = ftell(f); size = filename_start + file_size; *data = realloc(*data, size); fseek(f, 0, SEEK_SET); fread((*data + filename_start), 1, file_size, f); fclose(f); } else write_byte(in[0]); in++; // ? is an unknown byte, but only if the caller wants a mask } else if (in[0] == '?' && vmask) { write_blank(); in++; // $ changes the endian-ness } else if (in[0] == '$') { endian = !endian; in++; // # signifies a decimal number } else if (in[0] == '#') { // 8-bit unsigned long long value; in++; if (in[0] == '#') { // 16-bit in++; if (in[0] == '#') { // 32-bit in++; if (in[0] == '#') { // 64-bit in++; expand(8); parse_ull(in, (unsigned long long*)(&((*data)[size - 8])), 0); if (endian) bswap(&((*data)[size - 8]), 8); if (mask) *(unsigned long long*)(&((*mask)[size - 8])) = 0xFFFFFFFFFFFFFFFF; } else { expand(4); parse_ull(in, &value, 0); if (endian) bswap(&value, 4); *(int32_t*)(&((*data)[size - 4])) = value; if (mask) *(uint32_t*)(&((*mask)[size - 4])) = 0xFFFFFFFF; } } else { expand(2); parse_ull(in, &value, 0); if (endian) bswap(&value, 2); *(int16_t*)(&((*data)[size - 2])) = value; if (mask) *(uint16_t*)(&((*mask)[size - 2])) = 0xFFFF; } } else { expand(1); parse_ull(in, &value, 0); *(int8_t*)(&((*data)[size - 1])) = value; if (mask) *(uint8_t*)(&((*mask)[size - 1])) = 0xFF; } if (in[0] == '-') in++; while (isdigit(in[0])) in++; // % is a float, %% is a double } else if (in[0] == '%') { in++; if (in[0] == '%') { in++; expand(8); double* value = (double*)(&((*data)[size - 8])); sscanf(in, "%lf", value); if (endian) bswap(value, 8); if (mask) *(unsigned long long*)(&((*mask)[size - 8])) = 0xFFFFFFFFFFFFFFFF; } else { expand(4); float* value = (float*)(&((*data)[size - 4])); sscanf(in, "%f", value); if (endian) bswap(value, 4); if (mask) *(uint32_t*)(&((*mask)[size - 4])) = 0xFFFFFFFF; } if (in[0] == '-') in++; while (isdigit(in[0]) || (in[0] == '.')) in++; // anything else is a hex digit } else { if ((in[0] >= '0') && (in[0] <= '9')) { read = 1; chr |= (in[0] - '0'); } if ((in[0] >= 'A') && (in[0] <= 'F')) { read = 1; chr |= (in[0] - 'A' + 0x0A); } if ((in[0] >= 'a') && (in[0] <= 'f')) { read = 1; chr |= (in[0] - 'a' + 0x0A); } if (in[0] == '\"') string = 1; if (in[0] == '\'') unicode_string = 1; if (in[0] == '/' && in[1] == '/') comment = 1; if (in[0] == '/' && in[1] == '*') multiline_comment = 1; if (in[0] == '<') { filename = 1; filename_start = size; } in++; } if (read) { if (high) chr = chr << 4; else { write_byte(chr); chr = 0; } high = !high; } } return size; }
__forceinline static T ReadFromHardware(const u32 em_address) { int segment = em_address >> 28; bool performTranslation = UReg_MSR(MSR).DR; // Quick check for an address that can't meet any of the following conditions, // to speed up the MMU path. if (!BitSet32(0xCFC)[segment] && performTranslation) { // TODO: Figure out the fastest order of tests for both read and write (they are probably different). if (flag == FLAG_READ && (em_address & 0xF8000000) == 0xC8000000) { if (em_address < 0xcc000000) return EFB_Read(em_address); else return (T)Memory::mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address & 0x0FFFFFFF); } if (segment == 0x0 || segment == 0x8 || segment == 0xC) { // Handle RAM; the masking intentionally discards bits (essentially creating // mirrors of memory). // TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory. return bswap((*(const T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK])); } if (Memory::m_pEXRAM && (segment == 0x9 || segment == 0xD) && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE) { // Handle EXRAM. // TODO: Is this supposed to be mirrored like main RAM? return bswap((*(const T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF])); } if (segment == 0xE && (em_address < (0xE0000000 + Memory::L1_CACHE_SIZE))) { return bswap((*(const T*)&Memory::m_pL1Cache[em_address & 0x0FFFFFFF])); } } if (Memory::bFakeVMEM && performTranslation && (segment == 0x7 || segment == 0x4)) { // fake VMEM return bswap((*(const T*)&Memory::m_pFakeVMEM[em_address & Memory::FAKEVMEM_MASK])); } if (!performTranslation) { if (flag == FLAG_READ && (em_address & 0xF8000000) == 0x08000000) { if (em_address < 0x0c000000) return EFB_Read(em_address); else return (T)Memory::mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address); } if (segment == 0x0) { // Handle RAM; the masking intentionally discards bits (essentially creating // mirrors of memory). // TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory. return bswap((*(const T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK])); } if (Memory::m_pEXRAM && segment == 0x1 && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE) { return bswap((*(const T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF])); } PanicAlert("Unable to resolve read address %x PC %x", em_address, PC); return 0; } // MMU: Do page table translation u32 tlb_addr = TranslateAddress<flag>(em_address); if (tlb_addr == 0) { if (flag == FLAG_READ) GenerateDSIException(em_address, false); return 0; } // Handle loads that cross page boundaries (ewwww) // The alignment check isn't strictly necessary, but since this is a rare slow path, it provides a faster // (1 instruction on x86) bailout. if (sizeof(T) > 1 && (em_address & (sizeof(T) - 1)) && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T)) { // This could be unaligned down to the byte level... hopefully this is rare, so doing it this // way isn't too terrible. // TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions. // Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned! u32 em_address_next_page = (em_address + sizeof(T) - 1) & ~(HW_PAGE_SIZE - 1); u32 tlb_addr_next_page = TranslateAddress<flag>(em_address_next_page); if (tlb_addr == 0 || tlb_addr_next_page == 0) { if (flag == FLAG_READ) GenerateDSIException(em_address_next_page, false); return 0; } T var = 0; for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++) { if (addr == em_address_next_page) tlb_addr = tlb_addr_next_page; var = (var << 8) | Memory::physical_base[tlb_addr]; } return var; } // The easy case! return bswap(*(const T*)&Memory::physical_base[tlb_addr]); }
SInt64 EndianS64_BtoL(SInt64 value) { return bswap(value); }
SInt64 EndianS64_LtoB(SInt64 value) { return bswap(value); }
SInt32 EndianS32_BtoL(SInt32 value) { return bswap(value); }
SInt32 EndianS32_LtoB(SInt32 value) { return bswap(value); }
SInt16 EndianS16_LtoB(SInt16 value) { return bswap(value); }
SInt16 EndianS16_BtoL(SInt16 value) { return bswap(value); }
UInt64 Endian64_Swap(UInt64 value) { return bswap(value); }
UInt32 Endian32_Swap(UInt32 value) { return bswap(value); }
UInt16 EndianU16_BtoL(UInt16 value) { return bswap(value); }
static void mm_insw(unsigned long addr, void *buf, u32 len) { unsigned short *bp = (unsigned short *)buf; for (; len > 0; len--, bp++) *bp = bswap(*(volatile u16 *)addr); }
UInt16 EndianU16_LtoB(UInt16 value) { return bswap(value); }
{ if (flag == FLAG_WRITE) GenerateDSIException(em_address_next_page, true); return; } for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++, val >>= 8) { if (addr == em_address_next_page) tlb_addr = tlb_addr_next_page; Memory::physical_base[tlb_addr] = (u8)val; } return; } // The easy case! *(T*)&Memory::physical_base[tlb_addr] = bswap(data); } // ===================== // ================================= /* These functions are primarily called by the Interpreter functions and are routed to the correct location through ReadFromHardware and WriteToHardware */ // ---------------- static void GenerateISIException(u32 effective_address); u32 Read_Opcode(u32 address) { TryReadInstResult result = TryReadInstruction(address); if (!result.valid)
UInt32 EndianU32_BtoL(UInt32 value) { return bswap(value); }
__forceinline static void WriteToHardware(u32 em_address, const T data) { int segment = em_address >> 28; // Quick check for an address that can't meet any of the following conditions, // to speed up the MMU path. bool performTranslation = UReg_MSR(MSR).DR; if (!BitSet32(0xCFC)[segment] && performTranslation) { // First, let's check for FIFO writes, since they are probably the most common // reason we end up in this function. // Note that we must mask the address to correctly emulate certain games; // Pac-Man World 3 in particular is affected by this. if (flag == FLAG_WRITE && (em_address & 0xFFFFF000) == 0xCC008000) { switch (sizeof(T)) { case 1: GPFifo::Write8((u8)data); return; case 2: GPFifo::Write16((u16)data); return; case 4: GPFifo::Write32((u32)data); return; case 8: GPFifo::Write64((u64)data); return; } } if (flag == FLAG_WRITE && (em_address & 0xF8000000) == 0xC8000000) { if (em_address < 0xcc000000) { // TODO: This only works correctly for 32-bit writes. EFB_Write((u32)data, em_address); return; } else { Memory::mmio_mapping->Write(em_address & 0x0FFFFFFF, data); return; } } if (segment == 0x0 || segment == 0x8 || segment == 0xC) { // Handle RAM; the masking intentionally discards bits (essentially creating // mirrors of memory). // TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory. *(T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK] = bswap(data); return; } if (Memory::m_pEXRAM && (segment == 0x9 || segment == 0xD) && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE) { // Handle EXRAM. // TODO: Is this supposed to be mirrored like main RAM? *(T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF] = bswap(data); return; } if (segment == 0xE && (em_address < (0xE0000000 + Memory::L1_CACHE_SIZE))) { *(T*)&Memory::m_pL1Cache[em_address & 0x0FFFFFFF] = bswap(data); return; } } if (Memory::bFakeVMEM && performTranslation && (segment == 0x7 || segment == 0x4)) { // fake VMEM *(T*)&Memory::m_pFakeVMEM[em_address & Memory::FAKEVMEM_MASK] = bswap(data); return; } if (!performTranslation) { if (flag == FLAG_WRITE && (em_address & 0xFFFFF000) == 0x0C008000) { switch (sizeof(T)) { case 1: GPFifo::Write8((u8)data); return; case 2: GPFifo::Write16((u16)data); return; case 4: GPFifo::Write32((u32)data); return; case 8: GPFifo::Write64((u64)data); return; } } if (flag == FLAG_WRITE && (em_address & 0xF8000000) == 0x08000000) { if (em_address < 0x0c000000) { // TODO: This only works correctly for 32-bit writes. EFB_Write((u32)data, em_address); return; } else { Memory::mmio_mapping->Write(em_address, data); return; } } if (segment == 0x0) { // Handle RAM; the masking intentionally discards bits (essentially creating // mirrors of memory). // TODO: Only the first REALRAM_SIZE is supposed to be backed by actual memory. *(T*)&Memory::m_pRAM[em_address & Memory::RAM_MASK] = bswap(data); return; } if (Memory::m_pEXRAM && segment == 0x1 && (em_address & 0x0FFFFFFF) < Memory::EXRAM_SIZE) { *(T*)&Memory::m_pEXRAM[em_address & 0x0FFFFFFF] = bswap(data); return; } PanicAlert("Unable to resolve write address %x PC %x", em_address, PC); return; } // MMU: Do page table translation u32 tlb_addr = TranslateAddress<flag>(em_address); if (tlb_addr == 0) { if (flag == FLAG_WRITE) GenerateDSIException(em_address, true); return; } // Handle stores that cross page boundaries (ewwww) if (sizeof(T) > 1 && (em_address & (sizeof(T) - 1)) && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T)) { T val = bswap(data); // We need to check both addresses before writing in case there's a DSI. u32 em_address_next_page = (em_address + sizeof(T) - 1) & ~(HW_PAGE_SIZE - 1); u32 tlb_addr_next_page = TranslateAddress<flag>(em_address_next_page); if (tlb_addr_next_page == 0) { if (flag == FLAG_WRITE) GenerateDSIException(em_address_next_page, true); return; } for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++, val >>= 8) { if (addr == em_address_next_page) tlb_addr = tlb_addr_next_page; Memory::physical_base[tlb_addr] = (u8)val; } return; }
UInt32 EndianU32_LtoB(UInt32 value) { return bswap(value); }
// address: 0804837a.0 // full-signature: func(main, return=[<int(signed, 4),null,unknown>], parameter=[<int(signed, 4),argc,unknown>, <ptr(ptr(int(undef, 1))),argv,unknown>], varargs=false) s4 main(s4 argc, d1** argv) { reg_aa = bswap(305419896); (void) printf("Output is %x\n", reg_aa); return 0; }
UInt64 EndianU64_BtoL(UInt64 value) { return bswap(value); }
static int ata_getparam(struct ata_device *atadev, int init) { struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); struct ata_request *request; u_int8_t command = 0; int error = ENOMEM, retries = 2; if (ch->devices & (atadev->unit == ATA_MASTER ? ATA_ATA_MASTER : ATA_ATA_SLAVE)) command = ATA_ATA_IDENTIFY; if (ch->devices & (atadev->unit == ATA_MASTER ? ATA_ATAPI_MASTER : ATA_ATAPI_SLAVE)) command = ATA_ATAPI_IDENTIFY; if (!command) return ENXIO; while (retries-- > 0 && error) { if (!(request = ata_alloc_request())) break; request->dev = atadev->dev; request->timeout = 1; request->retries = 0; request->u.ata.command = command; request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT|ATA_R_QUIET); request->data = (void *)&atadev->param; request->bytecount = sizeof(struct ata_params); request->donecount = 0; request->transfersize = DEV_BSIZE; ata_queue_request(request); error = request->result; ata_free_request(request); } if (!error && (isprint(atadev->param.model[0]) || isprint(atadev->param.model[1]))) { struct ata_params *atacap = &atadev->param; char buffer[64]; int16_t *ptr; for (ptr = (int16_t *)atacap; ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) { *ptr = le16toh(*ptr); } if (!(!strncmp(atacap->model, "FX", 2) || !strncmp(atacap->model, "NEC", 3) || !strncmp(atacap->model, "Pioneer", 7) || !strncmp(atacap->model, "SHARP", 5))) { bswap(atacap->model, sizeof(atacap->model)); bswap(atacap->revision, sizeof(atacap->revision)); bswap(atacap->serial, sizeof(atacap->serial)); } btrim(atacap->model, sizeof(atacap->model)); bpack(atacap->model, atacap->model, sizeof(atacap->model)); btrim(atacap->revision, sizeof(atacap->revision)); bpack(atacap->revision, atacap->revision, sizeof(atacap->revision)); btrim(atacap->serial, sizeof(atacap->serial)); bpack(atacap->serial, atacap->serial, sizeof(atacap->serial)); if (bootverbose) kprintf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n", device_get_unit(ch->dev), atadev->unit == ATA_MASTER ? "master" : "slave", ata_mode2str(ata_pmode(atacap)), ata_mode2str(ata_wmode(atacap)), ata_mode2str(ata_umode(atacap)), (atacap->hwres & ATA_CABLE_ID) ? "80":"40"); if (init) { ksprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision); device_set_desc_copy(atadev->dev, buffer); if ((atadev->param.config & ATA_PROTO_ATAPI) && (atadev->param.config != ATA_CFA_MAGIC1) && (atadev->param.config != ATA_CFA_MAGIC2)) { if (atapi_dma && ch->dma && (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR && ata_umode(&atadev->param) >= ATA_UDMA2) atadev->mode = ATA_DMA_MAX; } else { if (ata_dma && ch->dma && (ata_umode(&atadev->param) > 0 || ata_wmode(&atadev->param) > 0)) atadev->mode = ATA_DMA_MAX; } } } else { if (!error) error = ENXIO; } return error; }
UInt64 EndianU64_LtoB(UInt64 value) { return bswap(value); }
__forceinline T ReadFromHardware(const u32 em_address) { int segment = em_address >> 28; // Quick check for an address that can't meet any of the following conditions, // to speed up the MMU path. if (!BitSet32(0xCFC)[segment]) { // TODO: Figure out the fastest order of tests for both read and write (they are probably different). if ((em_address & 0xC8000000) == 0xC8000000) { if (em_address < 0xcc000000) return EFB_Read(em_address); else return (T)mmio_mapping->Read<typename std::make_unsigned<T>::type>(em_address); } else if (segment == 0x8 || segment == 0xC || segment == 0x0) { return bswap((*(const T*)&m_pRAM[em_address & RAM_MASK])); } else if (m_pEXRAM && (segment == 0x9 || segment == 0xD || segment == 0x1)) { return bswap((*(const T*)&m_pEXRAM[em_address & EXRAM_MASK])); } else if (segment == 0xE && (em_address < (0xE0000000 + L1_CACHE_SIZE))) { return bswap((*(const T*)&m_pL1Cache[em_address & L1_CACHE_MASK])); } } if (bFakeVMEM && (segment == 0x7 || segment == 0x4)) { // fake VMEM return bswap((*(const T*)&m_pFakeVMEM[em_address & FAKEVMEM_MASK])); } // MMU: Do page table translation u32 tlb_addr = TranslateAddress<flag>(em_address); if (tlb_addr == 0) { if (flag == FLAG_READ) GenerateDSIException(em_address, false); return 0; } // Handle loads that cross page boundaries (ewwww) // The alignment check isn't strictly necessary, but since this is a rare slow path, it provides a faster // (1 instruction on x86) bailout. if (sizeof(T) > 1 && (em_address & (sizeof(T) - 1)) && (em_address & (HW_PAGE_SIZE - 1)) > HW_PAGE_SIZE - sizeof(T)) { // This could be unaligned down to the byte level... hopefully this is rare, so doing it this // way isn't too terrible. // TODO: floats on non-word-aligned boundaries should technically cause alignment exceptions. // Note that "word" means 32-bit, so paired singles or doubles might still be 32-bit aligned! u32 em_address_next_page = (em_address + sizeof(T) - 1) & ~(HW_PAGE_SIZE - 1); u32 tlb_addr_next_page = TranslateAddress<flag>(em_address_next_page); if (tlb_addr == 0 || tlb_addr_next_page == 0) { if (flag == FLAG_READ) GenerateDSIException(em_address_next_page, false); return 0; } T var = 0; for (u32 addr = em_address; addr < em_address + sizeof(T); addr++, tlb_addr++) { if (addr == em_address_next_page) tlb_addr = tlb_addr_next_page; var = (var << 8) | Memory::base[tlb_addr]; } return var; } // The easy case! return bswap(*(const T*)&Memory::base[tlb_addr]); }
UInt16 Endian16_Swap(UInt16 value) { return bswap(value); }
void unretardify_header(mobi_header &x) { bswap(x.header_len); bswap(x.mobi_type); bswap(x.text_encoding); bswap(x.u_id); bswap(x.file_version); bswap(x.ortographic_index); bswap(x.inflection_index); bswap(x.index_names); bswap(x.index_keys); bswap(x.extra_index0); bswap(x.extra_index1); bswap(x.extra_index2); bswap(x.extra_index3); bswap(x.extra_index4); bswap(x.extra_index5); bswap(x.first_nonbook_index); bswap(x.full_name_offset); bswap(x.full_name_length); bswap(x.locale); bswap(x.input_language); bswap(x.output_language); bswap(x.min_version); bswap(x.first_image_index); bswap(x.huffman_record_offset); bswap(x.huffman_record_count); bswap(x.huffman_table_offset); bswap(x.huffman_table_length); bswap(x.exth_flags); bswap(x.drm_offset); bswap(x.drm_count); bswap(x.drm_size); bswap(x.drm_flags); bswap(x.first_content_record_number); bswap(x.last_content_record_number); bswap(x.unknown); bswap(x.fcis_record_number); bswap(x.fcis_record_count); bswap(x.flis_record_number); bswap(x.flis_record_count); bswap(x.unk0); bswap(x.unk1); bswap(x.unk2); bswap(x.unk3); bswap(x.unk4); bswap(x.extra_record_data_flags); bswap(x.indx_record_offset); }
char* Tstream::ugets(char *buf0, int len) const { char *buf = buf0; bool eof = true, wasr = false; switch (encoding) { case ENC_UTF8: { wchar_t unicodeBuf[MAX_SUBTITLE_LENGTH + 1]; char srcBuf[MAX_SUBTITLE_LENGTH * 2 + 1]; unicodeBuf[MAX_SUBTITLE_LENGTH] = 0; srcBuf[MAX_SUBTITLE_LENGTH] = 0; // read a line int count = 0; char u1, u2, u3, u4; while (count <= MAX_SUBTITLE_LENGTH - 3 && read(&u1, 1, sizeof(u1)) == sizeof(u1)) { eof = false; if (!(u1 & 0x80)) { // 0xxxxxxx srcBuf[count++] = u1; } else if ((u1 & 0xe0) == 0xc0) { // 110xxxxx 10xxxxxx srcBuf[count++] = u1; if (read(&u2, 1, sizeof(u2)) == sizeof(u2)) { srcBuf[count++] = u2; } else { break; } } else if ((u1 & 0xf0) == 0xe0) { // 1110xxxx 10xxxxxx 10xxxxxx srcBuf[count++] = u1; if (read(&u2, 1, sizeof(u2)) == sizeof(u2)) { srcBuf[count++] = u2; } else { break; } if (read(&u3, 1, sizeof(u3)) == sizeof(u3)) { srcBuf[count++] = u3; } else { break; } } else if ((u1 & 0xf8) == 0xf0) { // 1111xxxx 10xxxxxx 10xxxxxx 10xxxxxx srcBuf[count++] = u1; if (read(&u2, 1, sizeof(u2)) == sizeof(u2)) { srcBuf[count++] = u2; } else { break; } if (read(&u3, 1, sizeof(u3)) == sizeof(u3)) { srcBuf[count++] = u3; } else { break; } if (read(&u4, 1, sizeof(u4)) == sizeof(u4)) { srcBuf[count++] = u4; } else { break; } } if (u1 == '\r') { if (!crln) { count--; } wasr = true; continue; } if (u1 == '\n') { if (utod && !wasr) { srcBuf[--count] = '\r'; } break; } } if (eof) { break; } srcBuf[count] = 0; int unicodeLen = MultiByteToWideChar(CP_UTF8, 0, srcBuf, -1, unicodeBuf, MAX_SUBTITLE_LENGTH * 2); int ansiLen = WideCharToMultiByte(CP_ACP, 0, unicodeBuf, unicodeLen, srcBuf, MAX_SUBTITLE_LENGTH * 2, NULL, NULL); memcpy(buf0, srcBuf, ansiLen < len ? ansiLen : len); break; } case ENC_LE16: { WCHAR w; while (read(&w, 1, sizeof(w)) == sizeof(w)) { eof = false; if (buf - buf0 > len) { break; } char c = '?'; if (!(w & 0xff00)) { c = char(w & 0xff); } else { char cA[10]; c = *unicode16toAnsi(&w, 1, cA); } if (c == '\r') { if (crln) { *buf++ = c; } wasr = true; continue; } if (c == '\n') { if (utod && !wasr) { *buf++ = '\r'; } if (crln) { *buf++ = c; } break; } *buf++ = c; } *buf = '\0'; break; } case ENC_BE16: { WCHAR w; while (read(&w, 1, sizeof(w)) == sizeof(w)) { eof = false; char c = '?'; if (!(w & 0xff)) { c = char(w >> 8); } else { bswap(w); char cA[10]; c = *unicode16toAnsi(&w, 1, cA); } if (c == '\r') { if (crln) { *buf++ = c; } wasr = true; continue; } if (c == '\n') { if (utod && !wasr) { *buf++ = '\r'; } if (crln) { *buf++ = c; } break; } *buf++ = c; } *buf = '\0'; break; }