Exemplo n.º 1
0
static inline void
buzhash_update (uint32_t *hash, const unsigned char *data, size_t len)
{
  *hash = rol32 (*hash, 1)
        ^ rol32 (buzhash_table[(size_t) (*data)], len)
        ^ buzhash_table[(size_t) (*(data + len))];
}
Exemplo n.º 2
0
// murmurHash3 (x86_32): https://code.google.com/p/smhasher/ - for non-cryptographic use only
uint32_t BRMurmur3_32(const void *data, size_t len, uint32_t seed)
{
    uint32_t h = seed, k = 0;
    size_t i, count = len/4;
    
    assert(data != NULL || len == 0);
    
    for (i = 0; i < count; i++) {
        k = le32(((const uint32_t *)data)[i])*C1;
        k = rol32(k, 15)*C2;
        h ^= k;
        h = rol32(h, 13)*5 + 0xe6546b64;
    }
    
    k = 0;
    
    switch (len & 3) {
        case 3: k ^= ((const uint8_t *)data)[i*4 + 2] << 16; // fall through
        case 2: k ^= ((const uint8_t *)data)[i*4 + 1] << 8; // fall through
        case 1: k ^= ((const uint8_t *)data)[i*4], k *= C1, h ^= rol32(k, 15)*C2;
    }
    
    h ^= len;
    fmix32(h);
    return h;
}
Exemplo n.º 3
0
static UINT32
SMS4_compound_substitute(UINT32 x)
{
  // first do substitute
  UINT32 t = SMS4_sbox_sub(x);

  // then a linear transform
  UINT32 c = t^rol32(t,2)^rol32(t,10)^rol32(t,18)^rol32(t,24);
  return c;
}
Exemplo n.º 4
0
static void michael_block(struct michael_mic_ctx *mctx, u32 val)
{
	mctx->l ^= val;
	mctx->r ^= rol32(mctx->l, 17);
	mctx->l += mctx->r;
	mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) |
		   ((mctx->l & 0x00ff00ff) << 8);
	mctx->l += mctx->r;
	mctx->r ^= rol32(mctx->l, 3);
	mctx->l += mctx->r;
	mctx->r ^= ror32(mctx->l, 2);
	mctx->l += mctx->r;
}
Exemplo n.º 5
0
static void _BRSHA1Compress(uint32_t *r, uint32_t *x)
{
    int i = 0;
    uint32_t a = r[0], b = r[1], c = r[2], d = r[3], e = r[4], t;
    
    for (; i < 16; i++) sha1(f1(b, c, d), 0x5a827999, (x[i] = be32(x[i])));
    for (; i < 20; i++) sha1(f1(b, c, d), 0x5a827999, (x[i] = rol32(x[i - 3] ^ x[i - 8] ^ x[i - 14] ^ x[i - 16], 1)));
    for (; i < 40; i++) sha1(f2(b, c, d), 0x6ed9eba1, (x[i] = rol32(x[i - 3] ^ x[i - 8] ^ x[i - 14] ^ x[i - 16], 1)));
    for (; i < 60; i++) sha1(f3(b, c, d), 0x8f1bbcdc, (x[i] = rol32(x[i - 3] ^ x[i - 8] ^ x[i - 14] ^ x[i - 16], 1)));
    for (; i < 80; i++) sha1(f2(b, c, d), 0xca62c1d6, (x[i] = rol32(x[i - 3] ^ x[i - 8] ^ x[i - 14] ^ x[i - 16], 1)));
    
    r[0] += a, r[1] += b, r[2] += c, r[3] += d, r[4] += e;
    var_clean(&a, &b, &c, &d, &e, &t);
}
Exemplo n.º 6
0
Arquivo: main.c Projeto: wgc1212/pimd
static void do_randomize(void)
{
#define rol32(data,shift) ((data) >> (shift)) | ((data) << (32 - (shift)))
   int fd;
   unsigned int seed;

   /* Setup a fallback seed based on quasi random. */
#ifdef SYSV
   seed = time(NULL);
#else
   seed = time(NULL) ^ gethostid();
#endif
   seed = rol32(seed, seed);

   fd = open("/dev/urandom", O_RDONLY);
   if (fd >= 0) {
       if (-1 == read(fd, &seed, sizeof(seed)))
	   warn("Failed reading entropy from /dev/urandom");
       close(fd);
  }

#ifdef SYSV
   srand48(seed);
#else
   srandom(seed);
#endif
}
Exemplo n.º 7
0
static void test_rol(void *p)
{
	/* rol16 */
	int_check(rol16(1, 1), 2);
	int_check(rol16(1, 15), 32768);
	int_check(rol16(0x8000, 1), 1);

	/* rol32 */
	int_check(rol32(1, 1), 2);
	int_check(rol32(0x80000000, 1), 1);

	/* rol64 */
	ull_check(rol64(1, 1), 2);
	ull_check(rol64(1, 63), 0x8000000000000000);
end:;
}
Exemplo n.º 8
0
static void sm3_expand(u32 *t, u32 *w, u32 *wt)
{
	int i;
	unsigned int tmp;

	/* load the input */
	for (i = 0; i <= 15; i++)
		w[i] = get_unaligned_be32((__u32 *)t + i);

	for (i = 16; i <= 67; i++) {
		tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15);
		w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6];
	}

	for (i = 0; i <= 63; i++)
		wt[i] = w[i] ^ w[i + 4];
}
Exemplo n.º 9
0
/*
 * ASCII case-insensitive (ie. A-Z) support for directories that was
 * used in IRIX.
 */
STATIC xfs_dahash_t
xfs_ascii_ci_hashname(
	struct xfs_name	*name)
{
	xfs_dahash_t	hash;
	int		i;

	for (i = 0, hash = 0; i < name->len; i++)
		hash = tolower(name->name[i]) ^ rol32(hash, 7);

	return hash;
}
Exemplo n.º 10
0
static inline void
buzhash_init (uint32_t *hash, const unsigned char *data, size_t len)
{
  size_t i;

  /**
   * Clang doesn't compute the right hash value if the last step get's pulled
   * into the loop (rol32(..., 0) is the problem).
   */
  *hash = 0ul;
  for (i = 1; i < len; i++, data++)
    *hash ^= rol32 (buzhash_table[(size_t) (*data)], len - i);
  *hash ^= buzhash_table[(size_t) (*data)];
}
Exemplo n.º 11
0
/*
 * sha_transform: single block SHA1 transform
 *
 * @digest: 160 bit digest to update
 * @data:   512 bits of data to hash
 * @W:      80 words of workspace (see note)
 *
 * This function generates a SHA1 digest for a single 512-bit block.
 * Be warned, it does not handle padding and message digest, do not
 * confuse it with the full FIPS 180-1 digest algorithm for variable
 * length messages.
 *
 * Note: If the hash is security sensitive, the caller should be sure
 * to clear the workspace. This is left to the caller to avoid
 * unnecessary clears between chained hashing operations.
 */
void sha_transform(__u32 *digest, const char *in, __u32 *W)
{
	__u32 a, b, c, d, e, t, i;

	for (i = 0; i < 16; i++)
		W[i] = be32_to_cpu(((const __be32 *)in)[i]);

	for (i = 0; i < 64; i++)
		W[i+16] = rol32(W[i+13] ^ W[i+8] ^ W[i+2] ^ W[i], 1);

	a = digest[0];
	b = digest[1];
	c = digest[2];
	d = digest[3];
	e = digest[4];

	for (i = 0; i < 20; i++) {
		t = f1(b, c, d) + K1 + rol32(a, 5) + e + W[i];
		e = d; d = c; c = rol32(b, 30); b = a; a = t;
	}

	for (; i < 40; i ++) {
		t = f2(b, c, d) + K2 + rol32(a, 5) + e + W[i];
		e = d; d = c; c = rol32(b, 30); b = a; a = t;
	}

	for (; i < 60; i ++) {
		t = f3(b, c, d) + K3 + rol32(a, 5) + e + W[i];
		e = d; d = c; c = rol32(b, 30); b = a; a = t;
	}

	for (; i < 80; i ++) {
		t = f2(b, c, d) + K4 + rol32(a, 5) + e + W[i];
		e = d; d = c; c = rol32(b, 30); b = a; a = t;
	}

	digest[0] += a;
	digest[1] += b;
	digest[2] += c;
	digest[3] += d;
	digest[4] += e;
}
Exemplo n.º 12
0
static void sm3_compress(u32 *w, u32 *wt, u32 *m)
{
	u32 ss1;
	u32 ss2;
	u32 tt1;
	u32 tt2;
	u32 a, b, c, d, e, f, g, h;
	int i;

	a = m[0];
	b = m[1];
	c = m[2];
	d = m[3];
	e = m[4];
	f = m[5];
	g = m[6];
	h = m[7];

	for (i = 0; i <= 63; i++) {

		ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);

		ss2 = ss1 ^ rol32(a, 12);

		tt1 = ff(i, a, b, c) + d + ss2 + *wt;
		wt++;

		tt2 = gg(i, e, f, g) + h + ss1 + *w;
		w++;

		d = c;
		c = rol32(b, 9);
		b = a;
		a = tt1;
		h = g;
		g = rol32(f, 19);
		f = e;
		e = p0(tt2);
	}

	m[0] = a ^ m[0];
	m[1] = b ^ m[1];
	m[2] = c ^ m[2];
	m[3] = d ^ m[3];
	m[4] = e ^ m[4];
	m[5] = f ^ m[5];
	m[6] = g ^ m[6];
	m[7] = h ^ m[7];

	a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0;
}
Exemplo n.º 13
0
/**
 * 
 *  rct2: 0x006A8B40
 */
void object_list_load()
{
	HANDLE hFindFile;
	WIN32_FIND_DATAA findFileData;
	int totalFiles = 0, totalFileSize = 0, fileDateModifiedChecksum = 0;

	char pluginPath[MAX_PATH];
	get_plugin_path(pluginPath);

	// Enumerate through each object in the directory
	hFindFile = FindFirstFile(RCT2_ADDRESS(RCT2_ADDRESS_OBJECT_DATA_PATH, char), &findFileData);
	if (hFindFile != INVALID_HANDLE_VALUE) {
		do {
			totalFiles++;
			totalFileSize += findFileData.nFileSizeLow;
			fileDateModifiedChecksum ^=
				findFileData.ftLastWriteTime.dwLowDateTime ^
				findFileData.ftLastWriteTime.dwHighDateTime;
			fileDateModifiedChecksum = ror32(fileDateModifiedChecksum, 5);
		} while (FindNextFile(hFindFile, &findFileData));
		FindClose(hFindFile);
	}

	totalFiles = ror32(totalFiles, 24);
	totalFiles = (totalFiles & ~0xFF) | 1;
	totalFiles = rol32(totalFiles, 24);

	// Read plugin header
	rct_plugin_header pluginHeader;

	FILE *file = fopen(pluginPath, "rb");
	if (file != NULL) {
		if (fread(&pluginHeader, sizeof(pluginHeader), 1, file) == 1) {
			// Check if object repository has changed in anyway
			if (
				totalFiles == pluginHeader.total_files &&
				totalFileSize == pluginHeader.total_file_size &&
				fileDateModifiedChecksum == pluginHeader.date_modified_checksum
			) {
				// Dispose installed object list
				if (RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, sint32) != -1) {
					rct2_free(RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, void*));
					RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, sint32) = -1;
				}

				// Read installed object list
				RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, void*) = rct2_malloc(pluginHeader.object_list_size);
				if (fread(RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, void*), pluginHeader.object_list_size, 1, file) == 1) {
					RCT2_GLOBAL(RCT2_ADDRESS_OBJECT_LIST_NO_ITEMS, uint32) = pluginHeader.object_list_no_items;

					fclose(file);
					sub_6A9FC0();
					object_list_examine();
					return;
				}
			}
		}
		fclose(file);
	}

	// Reload object list
	RCT2_GLOBAL(0x00F42B94, uint32) = totalFiles;
	RCT2_GLOBAL(0x00F42B98, uint32) = totalFileSize;
	RCT2_GLOBAL(0x00F42B9C, uint32) = fileDateModifiedChecksum;
	//RCT2_CALLPROC_EBPSAFE(0x006A8D8F);

	int eax = 3161;
	if (RCT2_GLOBAL(0x9AA00D, uint8) != 0){
		eax = 3160;
		RCT2_GLOBAL(0x9AA00D, uint8) = 0;
	}
	// File count removed and replaced by variable
	// RCT2_GLOBAL(0xF42BA8, uint32) = 0;
	uint32 file_count = 0;

	// Progress bar related.
	RCT2_GLOBAL(0xF42BD8, uint8) = 0;

	sub_6A9FC0();

	// Dispose installed object list
	if (RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, sint32) != -1) {
		rct2_free(RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, void*));
		RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, sint32) = -1;
	}

	RCT2_GLOBAL(RCT2_ADDRESS_OBJECT_LIST_NO_ITEMS, uint32) = 0;
	RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, void*) = rct2_malloc(4096);
	if (RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, int) == -1){
		RCT2_CALLPROC_X(0x006E3838, 0x343, 0xC5A, 0, 0, 0, 0, 0);
		return;
	}

	uint32 installed_buffer_size = 0x1000;
	uint32 current_item_offset = 0;

	hFindFile = FindFirstFile(RCT2_ADDRESS(RCT2_ADDRESS_OBJECT_DATA_PATH, char), &findFileData);
	if (hFindFile == INVALID_HANDLE_VALUE){
		//6a92ea This hasn't been implemented but there isn't much point.
		// It would make a empty object file if no files found.
		return;
	}

	for (uint8 first_time = 1; first_time || FindNextFile(hFindFile, &findFileData);){
		first_time = 0;

		RCT2_GLOBAL(0x9ABD98, HANDLE) = hFindFile;
		
		file_count++;
		// update progress bar. 
		eax = (file_count << 8) / ((RCT2_GLOBAL(0xF42B94, uint32) & 0xFFFFFF) + 1);



		if ((eax & 0xFF) != RCT2_GLOBAL(0xF42BD8, uint8)){
			RCT2_GLOBAL(0xF42BD8, uint8) = eax & 0xFF;
			// update progress bar
		}

		if ((installed_buffer_size - current_item_offset) <= 2842){
			installed_buffer_size += 0x1000;
			RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, void*) = rct2_realloc(RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, void*), installed_buffer_size);
			if (RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, int) == -1){
				RCT2_CALLPROC_X(0x006E3838, 0x343, 0xC5A, 0, 0, 0, 0, 0);
				return;
			}
		}

		char path[260];
		subsitute_path(path, RCT2_ADDRESS(RCT2_ADDRESS_OBJECT_DATA_PATH, char), findFileData.cFileName);

		FILE *obj_file = fopen(path, "rb");
		if (obj_file == NULL){
			continue;
		}

		rct_object_entry* entry = RCT2_ADDRESS(0xF42B74, rct_object_entry);
		if (fread(entry, sizeof(rct_object_entry), 1, obj_file) != 1){
			fclose(obj_file);
			continue;
		}
		fclose(obj_file);

		RCT2_GLOBAL(0xF42BC4, uint32) = current_item_offset;

		uint8* installed_entry_pointer = RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, uint8*) + current_item_offset;

		memcpy(installed_entry_pointer, entry, sizeof(rct_object_entry));
		installed_entry_pointer += sizeof(rct_object_entry);

		strcpy(installed_entry_pointer, findFileData.cFileName);
		while (*installed_entry_pointer++);

		*((sint32*)installed_entry_pointer) = -1;
		*(installed_entry_pointer + 4) = 0;
		*((sint32*)(installed_entry_pointer + 5)) = 0;
		*((uint16*)(installed_entry_pointer + 9)) = 0;
		*((uint32*)(installed_entry_pointer + 11)) = 0;

		RCT2_GLOBAL(0x9ADAF0, uint32) = 0xF26E;

		RCT2_GLOBAL(RCT2_ADDRESS_OBJECT_LIST_NO_ITEMS, uint32)++;

		// This is a variable used by object_load to decide if it should
		// use object_paint on the entry.
		RCT2_GLOBAL(0x9ADAFD, uint8) = 1;

		// Probably used by object paint.
		RCT2_GLOBAL(0x9ADAF4, uint32) = 0xF42BDB;

		int chunk_size;
		if (!object_load(-1, entry, &chunk_size)){
			RCT2_GLOBAL(0x9ADAF4, sint32) = -1;
			RCT2_GLOBAL(0x9ADAFD, uint8) = 0;
			RCT2_GLOBAL(RCT2_ADDRESS_OBJECT_LIST_NO_ITEMS, uint32)--;
			continue;
		}
		// See above note
		RCT2_GLOBAL(0x9ADAF4, sint32) = -1;
		RCT2_GLOBAL(0x9ADAFD, uint8) = 0;

		if ((entry->flags & 0xF0) == 0x80){
			RCT2_GLOBAL(0xF42B70, uint32)++;
			if (RCT2_GLOBAL(0xF42B70, uint32) > 772){
				RCT2_GLOBAL(0xF42B70, uint32)--;
				RCT2_GLOBAL(RCT2_ADDRESS_OBJECT_LIST_NO_ITEMS, uint32)--;
				continue;
			}
		}
		*((sint32*)installed_entry_pointer) = chunk_size;
		installed_entry_pointer += 4;

		uint8* chunk = RCT2_GLOBAL(RCT2_ADDRESS_CURR_OBJECT_CHUNK_POINTER, uint8*); // Loaded in object_load

		// When made of two parts i.e Wooden Roller Coaster (Dream Woodie Cars);
		if ((entry->flags & 0xF) == 0 && !(*((uint32*)(chunk + 8)) & 0x1000)){
			rct_string_id obj_string = chunk[12];
			if (obj_string == 0xFF){
				obj_string = chunk[13];
				if (obj_string == 0xFF){
					obj_string = chunk[14];
				}
			}

			obj_string += 2;
			format_string(installed_entry_pointer, obj_string, 0);
			strcat(installed_entry_pointer, "\t (");
			strcat(installed_entry_pointer, language_get_string(RCT2_GLOBAL(0xF42BBC, uint32)));
			strcat(installed_entry_pointer, ")");
			while (*installed_entry_pointer++);
		}
		else{
			strcpy(installed_entry_pointer, language_get_string(RCT2_GLOBAL(0xF42BBC, uint32)));
			while (*installed_entry_pointer++);
		}
		*((uint32*)installed_entry_pointer) = RCT2_GLOBAL(0x9ADAF0, uint32) - 0xF26E;
		installed_entry_pointer += 4;

		uint8* esi = RCT2_ADDRESS(0xF42BDB, uint8);
		int cl = *esi++;
		*installed_entry_pointer++ = cl;
		if (cl){
			memcpy(installed_entry_pointer, esi, cl*sizeof(rct_object_entry));
			installed_entry_pointer += cl*sizeof(rct_object_entry);
		}

		cl = *esi++;
		*installed_entry_pointer++ = cl;
		if (cl){
			memcpy(installed_entry_pointer, esi, cl*sizeof(rct_object_entry));
			installed_entry_pointer += cl*sizeof(rct_object_entry);
		}

		*((uint32*)installed_entry_pointer) = RCT2_GLOBAL(0xF433DD, uint32);
		installed_entry_pointer += 4;

		int size_of_object = installed_entry_pointer - RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, uint8*) - current_item_offset;

		object_unload(entry->flags & 0xF, (rct_object_entry_extended*)entry);

		// Return pointer to start of entry
		installed_entry_pointer -= size_of_object;

		uint8* copied_entry = RCT2_ADDRESS(0x140E9AC, uint8);

		size_of_object = object_copy(copied_entry, installed_entry_pointer);

		RCT2_GLOBAL(RCT2_ADDRESS_OBJECT_LIST_NO_ITEMS, uint32)--;
		copied_entry += sizeof(rct_object_entry);
		// Skip filename
		while (*copied_entry++);

		// Skip 
		copied_entry += 4;

		installed_entry_pointer = RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, uint8*);

		for (uint32 i = 0; i < RCT2_GLOBAL(RCT2_ADDRESS_OBJECT_LIST_NO_ITEMS, uint32); ++i){

			uint8* temp_installed_entry = installed_entry_pointer;
			temp_installed_entry += sizeof(rct_object_entry);

			// Skip filename
			while (*temp_installed_entry++);

			// Skip 
			temp_installed_entry += 4;

			if (strcmp(temp_installed_entry, copied_entry) <= 0)break;

			installed_entry_pointer = (uint8*)(object_get_next((rct_object_entry*)installed_entry_pointer));
		}

		// Difference to new location
		int no_bytes_to_move = RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, uint8*) + current_item_offset - installed_entry_pointer;

		uint8* curr_location = installed_entry_pointer;
		uint8* move_location = installed_entry_pointer + size_of_object;

		if (no_bytes_to_move){
			memmove(move_location, curr_location, no_bytes_to_move);
		}

		copied_entry = RCT2_ADDRESS(0x140E9AC, uint8);
		memcpy(installed_entry_pointer, copied_entry, size_of_object);
		current_item_offset += size_of_object;
		RCT2_GLOBAL(RCT2_ADDRESS_OBJECT_LIST_NO_ITEMS, uint32)++;
	}
Exemplo n.º 14
0
static inline u32 p1(u32 x)
{
	return x ^ rol32(x, 15) ^ rol32(x, 23);
}
Exemplo n.º 15
0
Arquivo: tci.c Projeto: 8tab/qemu
/* Interpret pseudo code in tb. */
uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
{
    long tcg_temps[CPU_TEMP_BUF_NLONGS];
    uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
    uintptr_t ret = 0;

    tci_reg[TCG_AREG0] = (tcg_target_ulong)env;
    tci_reg[TCG_REG_CALL_STACK] = sp_value;
    tci_assert(tb_ptr);

    for (;;) {
        TCGOpcode opc = tb_ptr[0];
#if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
        uint8_t op_size = tb_ptr[1];
        uint8_t *old_code_ptr = tb_ptr;
#endif
        tcg_target_ulong t0;
        tcg_target_ulong t1;
        tcg_target_ulong t2;
        tcg_target_ulong label;
        TCGCond condition;
        target_ulong taddr;
        uint8_t tmp8;
        uint16_t tmp16;
        uint32_t tmp32;
        uint64_t tmp64;
#if TCG_TARGET_REG_BITS == 32
        uint64_t v64;
#endif
        TCGMemOpIdx oi;

#if defined(GETPC)
        tci_tb_ptr = (uintptr_t)tb_ptr;
#endif

        /* Skip opcode and size entry. */
        tb_ptr += 2;

        switch (opc) {
        case INDEX_op_call:
            t0 = tci_read_ri(&tb_ptr);
#if TCG_TARGET_REG_BITS == 32
            tmp64 = ((helper_function)t0)(tci_read_reg(TCG_REG_R0),
                                          tci_read_reg(TCG_REG_R1),
                                          tci_read_reg(TCG_REG_R2),
                                          tci_read_reg(TCG_REG_R3),
                                          tci_read_reg(TCG_REG_R5),
                                          tci_read_reg(TCG_REG_R6),
                                          tci_read_reg(TCG_REG_R7),
                                          tci_read_reg(TCG_REG_R8),
                                          tci_read_reg(TCG_REG_R9),
                                          tci_read_reg(TCG_REG_R10));
            tci_write_reg(TCG_REG_R0, tmp64);
            tci_write_reg(TCG_REG_R1, tmp64 >> 32);
#else
            tmp64 = ((helper_function)t0)(tci_read_reg(TCG_REG_R0),
                                          tci_read_reg(TCG_REG_R1),
                                          tci_read_reg(TCG_REG_R2),
                                          tci_read_reg(TCG_REG_R3),
                                          tci_read_reg(TCG_REG_R5));
            tci_write_reg(TCG_REG_R0, tmp64);
#endif
            break;
        case INDEX_op_br:
            label = tci_read_label(&tb_ptr);
            tci_assert(tb_ptr == old_code_ptr + op_size);
            tb_ptr = (uint8_t *)label;
            continue;
        case INDEX_op_setcond_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            condition = *tb_ptr++;
            tci_write_reg32(t0, tci_compare32(t1, t2, condition));
            break;
#if TCG_TARGET_REG_BITS == 32
        case INDEX_op_setcond2_i32:
            t0 = *tb_ptr++;
            tmp64 = tci_read_r64(&tb_ptr);
            v64 = tci_read_ri64(&tb_ptr);
            condition = *tb_ptr++;
            tci_write_reg32(t0, tci_compare64(tmp64, v64, condition));
            break;
#elif TCG_TARGET_REG_BITS == 64
        case INDEX_op_setcond_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            condition = *tb_ptr++;
            tci_write_reg64(t0, tci_compare64(t1, t2, condition));
            break;
#endif
        case INDEX_op_mov_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;
        case INDEX_op_movi_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_i32(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;

            /* Load/store operations (32 bit). */

        case INDEX_op_ld8u_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg8(t0, *(uint8_t *)(t1 + t2));
            break;
        case INDEX_op_ld8s_i32:
        case INDEX_op_ld16u_i32:
            TODO();
            break;
        case INDEX_op_ld16s_i32:
            TODO();
            break;
        case INDEX_op_ld_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg32(t0, *(uint32_t *)(t1 + t2));
            break;
        case INDEX_op_st8_i32:
            t0 = tci_read_r8(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            *(uint8_t *)(t1 + t2) = t0;
            break;
        case INDEX_op_st16_i32:
            t0 = tci_read_r16(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            *(uint16_t *)(t1 + t2) = t0;
            break;
        case INDEX_op_st_i32:
            t0 = tci_read_r32(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_assert(t1 != sp_value || (int32_t)t2 < 0);
            *(uint32_t *)(t1 + t2) = t0;
            break;

            /* Arithmetic operations (32 bit). */

        case INDEX_op_add_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 + t2);
            break;
        case INDEX_op_sub_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 - t2);
            break;
        case INDEX_op_mul_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 * t2);
            break;
#if TCG_TARGET_HAS_div_i32
        case INDEX_op_div_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, (int32_t)t1 / (int32_t)t2);
            break;
        case INDEX_op_divu_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 / t2);
            break;
        case INDEX_op_rem_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, (int32_t)t1 % (int32_t)t2);
            break;
        case INDEX_op_remu_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 % t2);
            break;
#elif TCG_TARGET_HAS_div2_i32
        case INDEX_op_div2_i32:
        case INDEX_op_divu2_i32:
            TODO();
            break;
#endif
        case INDEX_op_and_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 & t2);
            break;
        case INDEX_op_or_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 | t2);
            break;
        case INDEX_op_xor_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 ^ t2);
            break;

            /* Shift/rotate operations (32 bit). */

        case INDEX_op_shl_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 << (t2 & 31));
            break;
        case INDEX_op_shr_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 >> (t2 & 31));
            break;
        case INDEX_op_sar_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, ((int32_t)t1 >> (t2 & 31)));
            break;
#if TCG_TARGET_HAS_rot_i32
        case INDEX_op_rotl_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, rol32(t1, t2 & 31));
            break;
        case INDEX_op_rotr_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, ror32(t1, t2 & 31));
            break;
#endif
#if TCG_TARGET_HAS_deposit_i32
        case INDEX_op_deposit_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            t2 = tci_read_r32(&tb_ptr);
            tmp16 = *tb_ptr++;
            tmp8 = *tb_ptr++;
            tmp32 = (((1 << tmp8) - 1) << tmp16);
            tci_write_reg32(t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
            break;
#endif
        case INDEX_op_brcond_i32:
            t0 = tci_read_r32(&tb_ptr);
            t1 = tci_read_ri32(&tb_ptr);
            condition = *tb_ptr++;
            label = tci_read_label(&tb_ptr);
            if (tci_compare32(t0, t1, condition)) {
                tci_assert(tb_ptr == old_code_ptr + op_size);
                tb_ptr = (uint8_t *)label;
                continue;
            }
            break;
#if TCG_TARGET_REG_BITS == 32
        case INDEX_op_add2_i32:
            t0 = *tb_ptr++;
            t1 = *tb_ptr++;
            tmp64 = tci_read_r64(&tb_ptr);
            tmp64 += tci_read_r64(&tb_ptr);
            tci_write_reg64(t1, t0, tmp64);
            break;
        case INDEX_op_sub2_i32:
            t0 = *tb_ptr++;
            t1 = *tb_ptr++;
            tmp64 = tci_read_r64(&tb_ptr);
            tmp64 -= tci_read_r64(&tb_ptr);
            tci_write_reg64(t1, t0, tmp64);
            break;
        case INDEX_op_brcond2_i32:
            tmp64 = tci_read_r64(&tb_ptr);
            v64 = tci_read_ri64(&tb_ptr);
            condition = *tb_ptr++;
            label = tci_read_label(&tb_ptr);
            if (tci_compare64(tmp64, v64, condition)) {
                tci_assert(tb_ptr == old_code_ptr + op_size);
                tb_ptr = (uint8_t *)label;
                continue;
            }
            break;
        case INDEX_op_mulu2_i32:
            t0 = *tb_ptr++;
            t1 = *tb_ptr++;
            t2 = tci_read_r32(&tb_ptr);
            tmp64 = tci_read_r32(&tb_ptr);
            tci_write_reg64(t1, t0, t2 * tmp64);
            break;
#endif /* TCG_TARGET_REG_BITS == 32 */
#if TCG_TARGET_HAS_ext8s_i32
        case INDEX_op_ext8s_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r8s(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext16s_i32
        case INDEX_op_ext16s_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r16s(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext8u_i32
        case INDEX_op_ext8u_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r8(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext16u_i32
        case INDEX_op_ext16u_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r16(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_bswap16_i32
        case INDEX_op_bswap16_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r16(&tb_ptr);
            tci_write_reg32(t0, bswap16(t1));
            break;
#endif
#if TCG_TARGET_HAS_bswap32_i32
        case INDEX_op_bswap32_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg32(t0, bswap32(t1));
            break;
#endif
#if TCG_TARGET_HAS_not_i32
        case INDEX_op_not_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg32(t0, ~t1);
            break;
#endif
#if TCG_TARGET_HAS_neg_i32
        case INDEX_op_neg_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg32(t0, -t1);
            break;
#endif
#if TCG_TARGET_REG_BITS == 64
        case INDEX_op_mov_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
        case INDEX_op_movi_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_i64(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;

            /* Load/store operations (64 bit). */

        case INDEX_op_ld8u_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg8(t0, *(uint8_t *)(t1 + t2));
            break;
        case INDEX_op_ld8s_i64:
        case INDEX_op_ld16u_i64:
        case INDEX_op_ld16s_i64:
            TODO();
            break;
        case INDEX_op_ld32u_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg32(t0, *(uint32_t *)(t1 + t2));
            break;
        case INDEX_op_ld32s_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg32s(t0, *(int32_t *)(t1 + t2));
            break;
        case INDEX_op_ld_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg64(t0, *(uint64_t *)(t1 + t2));
            break;
        case INDEX_op_st8_i64:
            t0 = tci_read_r8(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            *(uint8_t *)(t1 + t2) = t0;
            break;
        case INDEX_op_st16_i64:
            t0 = tci_read_r16(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            *(uint16_t *)(t1 + t2) = t0;
            break;
        case INDEX_op_st32_i64:
            t0 = tci_read_r32(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            *(uint32_t *)(t1 + t2) = t0;
            break;
        case INDEX_op_st_i64:
            t0 = tci_read_r64(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_assert(t1 != sp_value || (int32_t)t2 < 0);
            *(uint64_t *)(t1 + t2) = t0;
            break;

            /* Arithmetic operations (64 bit). */

        case INDEX_op_add_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 + t2);
            break;
        case INDEX_op_sub_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 - t2);
            break;
        case INDEX_op_mul_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 * t2);
            break;
#if TCG_TARGET_HAS_div_i64
        case INDEX_op_div_i64:
        case INDEX_op_divu_i64:
        case INDEX_op_rem_i64:
        case INDEX_op_remu_i64:
            TODO();
            break;
#elif TCG_TARGET_HAS_div2_i64
        case INDEX_op_div2_i64:
        case INDEX_op_divu2_i64:
            TODO();
            break;
#endif
        case INDEX_op_and_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 & t2);
            break;
        case INDEX_op_or_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 | t2);
            break;
        case INDEX_op_xor_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 ^ t2);
            break;

            /* Shift/rotate operations (64 bit). */

        case INDEX_op_shl_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 << (t2 & 63));
            break;
        case INDEX_op_shr_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 >> (t2 & 63));
            break;
        case INDEX_op_sar_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, ((int64_t)t1 >> (t2 & 63)));
            break;
#if TCG_TARGET_HAS_rot_i64
        case INDEX_op_rotl_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, rol64(t1, t2 & 63));
            break;
        case INDEX_op_rotr_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, ror64(t1, t2 & 63));
            break;
#endif
#if TCG_TARGET_HAS_deposit_i64
        case INDEX_op_deposit_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            t2 = tci_read_r64(&tb_ptr);
            tmp16 = *tb_ptr++;
            tmp8 = *tb_ptr++;
            tmp64 = (((1ULL << tmp8) - 1) << tmp16);
            tci_write_reg64(t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64));
            break;
#endif
        case INDEX_op_brcond_i64:
            t0 = tci_read_r64(&tb_ptr);
            t1 = tci_read_ri64(&tb_ptr);
            condition = *tb_ptr++;
            label = tci_read_label(&tb_ptr);
            if (tci_compare64(t0, t1, condition)) {
                tci_assert(tb_ptr == old_code_ptr + op_size);
                tb_ptr = (uint8_t *)label;
                continue;
            }
            break;
#if TCG_TARGET_HAS_ext8u_i64
        case INDEX_op_ext8u_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r8(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext8s_i64
        case INDEX_op_ext8s_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r8s(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext16s_i64
        case INDEX_op_ext16s_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r16s(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext16u_i64
        case INDEX_op_ext16u_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r16(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext32s_i64
        case INDEX_op_ext32s_i64:
#endif
        case INDEX_op_ext_i32_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r32s(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#if TCG_TARGET_HAS_ext32u_i64
        case INDEX_op_ext32u_i64:
#endif
        case INDEX_op_extu_i32_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#if TCG_TARGET_HAS_bswap16_i64
        case INDEX_op_bswap16_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r16(&tb_ptr);
            tci_write_reg64(t0, bswap16(t1));
            break;
#endif
#if TCG_TARGET_HAS_bswap32_i64
        case INDEX_op_bswap32_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg64(t0, bswap32(t1));
            break;
#endif
#if TCG_TARGET_HAS_bswap64_i64
        case INDEX_op_bswap64_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            tci_write_reg64(t0, bswap64(t1));
            break;
#endif
#if TCG_TARGET_HAS_not_i64
        case INDEX_op_not_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            tci_write_reg64(t0, ~t1);
            break;
#endif
#if TCG_TARGET_HAS_neg_i64
        case INDEX_op_neg_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            tci_write_reg64(t0, -t1);
            break;
#endif
#endif /* TCG_TARGET_REG_BITS == 64 */

            /* QEMU specific operations. */

        case INDEX_op_exit_tb:
            ret = *(uint64_t *)tb_ptr;
            goto exit;
            break;
        case INDEX_op_goto_tb:
            /* Jump address is aligned */
            tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4);
            t0 = atomic_read((int32_t *)tb_ptr);
            tb_ptr += sizeof(int32_t);
            tci_assert(tb_ptr == old_code_ptr + op_size);
            tb_ptr += (int32_t)t0;
            continue;
        case INDEX_op_qemu_ld_i32:
            t0 = *tb_ptr++;
            taddr = tci_read_ulong(&tb_ptr);
            oi = tci_read_i(&tb_ptr);
            switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
            case MO_UB:
                tmp32 = qemu_ld_ub;
                break;
            case MO_SB:
                tmp32 = (int8_t)qemu_ld_ub;
                break;
            case MO_LEUW:
                tmp32 = qemu_ld_leuw;
                break;
            case MO_LESW:
                tmp32 = (int16_t)qemu_ld_leuw;
                break;
            case MO_LEUL:
                tmp32 = qemu_ld_leul;
                break;
            case MO_BEUW:
                tmp32 = qemu_ld_beuw;
                break;
            case MO_BESW:
                tmp32 = (int16_t)qemu_ld_beuw;
                break;
            case MO_BEUL:
                tmp32 = qemu_ld_beul;
                break;
            default:
                tcg_abort();
            }
            tci_write_reg(t0, tmp32);
            break;
        case INDEX_op_qemu_ld_i64:
            t0 = *tb_ptr++;
            if (TCG_TARGET_REG_BITS == 32) {
                t1 = *tb_ptr++;
            }
            taddr = tci_read_ulong(&tb_ptr);
            oi = tci_read_i(&tb_ptr);
            switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
            case MO_UB:
                tmp64 = qemu_ld_ub;
                break;
            case MO_SB:
                tmp64 = (int8_t)qemu_ld_ub;
                break;
            case MO_LEUW:
                tmp64 = qemu_ld_leuw;
                break;
            case MO_LESW:
                tmp64 = (int16_t)qemu_ld_leuw;
                break;
            case MO_LEUL:
                tmp64 = qemu_ld_leul;
                break;
            case MO_LESL:
                tmp64 = (int32_t)qemu_ld_leul;
                break;
            case MO_LEQ:
                tmp64 = qemu_ld_leq;
                break;
            case MO_BEUW:
                tmp64 = qemu_ld_beuw;
                break;
            case MO_BESW:
                tmp64 = (int16_t)qemu_ld_beuw;
                break;
            case MO_BEUL:
                tmp64 = qemu_ld_beul;
                break;
            case MO_BESL:
                tmp64 = (int32_t)qemu_ld_beul;
                break;
            case MO_BEQ:
                tmp64 = qemu_ld_beq;
                break;
            default:
                tcg_abort();
            }
            tci_write_reg(t0, tmp64);
            if (TCG_TARGET_REG_BITS == 32) {
                tci_write_reg(t1, tmp64 >> 32);
            }
            break;
        case INDEX_op_qemu_st_i32:
            t0 = tci_read_r(&tb_ptr);
            taddr = tci_read_ulong(&tb_ptr);
            oi = tci_read_i(&tb_ptr);
            switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
            case MO_UB:
                qemu_st_b(t0);
                break;
            case MO_LEUW:
                qemu_st_lew(t0);
                break;
            case MO_LEUL:
                qemu_st_lel(t0);
                break;
            case MO_BEUW:
                qemu_st_bew(t0);
                break;
            case MO_BEUL:
                qemu_st_bel(t0);
                break;
            default:
                tcg_abort();
            }
            break;
        case INDEX_op_qemu_st_i64:
            tmp64 = tci_read_r64(&tb_ptr);
            taddr = tci_read_ulong(&tb_ptr);
            oi = tci_read_i(&tb_ptr);
            switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
            case MO_UB:
                qemu_st_b(tmp64);
                break;
            case MO_LEUW:
                qemu_st_lew(tmp64);
                break;
            case MO_LEUL:
                qemu_st_lel(tmp64);
                break;
            case MO_LEQ:
                qemu_st_leq(tmp64);
                break;
            case MO_BEUW:
                qemu_st_bew(tmp64);
                break;
            case MO_BEUL:
                qemu_st_bel(tmp64);
                break;
            case MO_BEQ:
                qemu_st_beq(tmp64);
                break;
            default:
                tcg_abort();
            }
            break;
        case INDEX_op_mb:
            /* Ensure ordering for all kinds */
            smp_mb();
            break;
        default:
            TODO();
            break;
        }
        tci_assert(tb_ptr == old_code_ptr + op_size);
    }
Exemplo n.º 16
0
static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
{
    uint64_t l64, h64;

    switch (op) {
    CASE_OP_32_64(add):
        return x + y;

    CASE_OP_32_64(sub):
        return x - y;

    CASE_OP_32_64(mul):
        return x * y;

    CASE_OP_32_64(and):
        return x & y;

    CASE_OP_32_64(or):
        return x | y;

    CASE_OP_32_64(xor):
        return x ^ y;

    case INDEX_op_shl_i32:
        return (uint32_t)x << (y & 31);

    case INDEX_op_shl_i64:
        return (uint64_t)x << (y & 63);

    case INDEX_op_shr_i32:
        return (uint32_t)x >> (y & 31);

    case INDEX_op_trunc_shr_i32:
    case INDEX_op_shr_i64:
        return (uint64_t)x >> (y & 63);

    case INDEX_op_sar_i32:
        return (int32_t)x >> (y & 31);

    case INDEX_op_sar_i64:
        return (int64_t)x >> (y & 63);

    case INDEX_op_rotr_i32:
        return ror32(x, y & 31);

    case INDEX_op_rotr_i64:
        return (TCGArg)ror64(x, y & 63);

    case INDEX_op_rotl_i32:
        return rol32(x, y & 31);

    case INDEX_op_rotl_i64:
        return (TCGArg)rol64(x, y & 63);

    CASE_OP_32_64(not):
        return ~x;

    CASE_OP_32_64(neg):
        return 0-x;

    CASE_OP_32_64(andc):
        return x & ~y;

    CASE_OP_32_64(orc):
        return x | ~y;

    CASE_OP_32_64(eqv):
        return ~(x ^ y);

    CASE_OP_32_64(nand):
        return ~(x & y);

    CASE_OP_32_64(nor):
        return ~(x | y);

    CASE_OP_32_64(ext8s):
        return (int8_t)x;

    CASE_OP_32_64(ext16s):
        return (int16_t)x;

    CASE_OP_32_64(ext8u):
        return (uint8_t)x;

    CASE_OP_32_64(ext16u):
        return (uint16_t)x;

    case INDEX_op_ext32s_i64:
        return (int32_t)x;

    case INDEX_op_ext32u_i64:
        return (uint32_t)x;

    case INDEX_op_muluh_i32:
        return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
    case INDEX_op_mulsh_i32:
        return ((int64_t)(int32_t)x * (int32_t)y) >> 32;

    case INDEX_op_muluh_i64:
        mulu64(&l64, &h64, x, y);
        return (TCGArg)h64;
    case INDEX_op_mulsh_i64:
        muls64(&l64, &h64, x, y);
        return (TCGArg)h64;

    case INDEX_op_div_i32:
        /* Avoid crashing on divide by zero, otherwise undefined.  */
        return (int32_t)x / ((int32_t)y ? (int32_t)y : 1);
    case INDEX_op_divu_i32:
        return (uint32_t)x / ((uint32_t)y ? (uint32_t)y : 1);
    case INDEX_op_div_i64:
        return (int64_t)x / ((int64_t)y ? (int64_t)y : 1);
    case INDEX_op_divu_i64:
        return (uint64_t)x / ((uint64_t)y ? (uint64_t)y : 1);

    case INDEX_op_rem_i32:
        return (int32_t)x % ((int32_t)y ? (int32_t)y : 1);
    case INDEX_op_remu_i32:
        return (uint32_t)x % ((uint32_t)y ? (uint32_t)y : 1);
    case INDEX_op_rem_i64:
        return (int64_t)x % ((int64_t)y ? (int64_t)y : 1);
    case INDEX_op_remu_i64:
        return (uint64_t)x % ((uint64_t)y ? (uint64_t)y : 1);

    default:
        fprintf(stderr,
                "Unrecognized operation %d in do_constant_folding.\n", op);
        tcg_abort();
    }
}
Exemplo n.º 17
0
static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
{
	u32 x[16];
	int i;

	memcpy(x, input, sizeof(x));
	for (i = 20; i > 0; i -= 2) {
		x[ 4] ^= rol32((x[ 0] + x[12]),  7);
		x[ 8] ^= rol32((x[ 4] + x[ 0]),  9);
		x[12] ^= rol32((x[ 8] + x[ 4]), 13);
		x[ 0] ^= rol32((x[12] + x[ 8]), 18);
		x[ 9] ^= rol32((x[ 5] + x[ 1]),  7);
		x[13] ^= rol32((x[ 9] + x[ 5]),  9);
		x[ 1] ^= rol32((x[13] + x[ 9]), 13);
		x[ 5] ^= rol32((x[ 1] + x[13]), 18);
		x[14] ^= rol32((x[10] + x[ 6]),  7);
		x[ 2] ^= rol32((x[14] + x[10]),  9);
		x[ 6] ^= rol32((x[ 2] + x[14]), 13);
		x[10] ^= rol32((x[ 6] + x[ 2]), 18);
		x[ 3] ^= rol32((x[15] + x[11]),  7);
		x[ 7] ^= rol32((x[ 3] + x[15]),  9);
		x[11] ^= rol32((x[ 7] + x[ 3]), 13);
		x[15] ^= rol32((x[11] + x[ 7]), 18);
		x[ 1] ^= rol32((x[ 0] + x[ 3]),  7);
		x[ 2] ^= rol32((x[ 1] + x[ 0]),  9);
		x[ 3] ^= rol32((x[ 2] + x[ 1]), 13);
		x[ 0] ^= rol32((x[ 3] + x[ 2]), 18);
		x[ 6] ^= rol32((x[ 5] + x[ 4]),  7);
		x[ 7] ^= rol32((x[ 6] + x[ 5]),  9);
		x[ 4] ^= rol32((x[ 7] + x[ 6]), 13);
		x[ 5] ^= rol32((x[ 4] + x[ 7]), 18);
		x[11] ^= rol32((x[10] + x[ 9]),  7);
		x[ 8] ^= rol32((x[11] + x[10]),  9);
		x[ 9] ^= rol32((x[ 8] + x[11]), 13);
		x[10] ^= rol32((x[ 9] + x[ 8]), 18);
		x[12] ^= rol32((x[15] + x[14]),  7);
		x[13] ^= rol32((x[12] + x[15]),  9);
		x[14] ^= rol32((x[13] + x[12]), 13);
		x[15] ^= rol32((x[14] + x[13]), 18);
	}
	for (i = 0; i < 16; ++i)
		x[i] += input[i];
	for (i = 0; i < 16; ++i)
		U32TO8_LITTLE(output + 4 * i,x[i]);
}
static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
{
	u32 dw, tl, tr;
	u32 kw4l, kw4r;

	/* absorb kw2 to other subkeys */
	/* round 2 */
	subL[3] ^= subL[1]; subR[3] ^= subR[1];
	/* round 4 */
	subL[5] ^= subL[1]; subR[5] ^= subR[1];
	/* round 6 */
	subL[7] ^= subL[1]; subR[7] ^= subR[1];
	subL[1] ^= subR[1] & ~subR[9];
	dw = subL[1] & subL[9],
		subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
	/* round 8 */
	subL[11] ^= subL[1]; subR[11] ^= subR[1];
	/* round 10 */
	subL[13] ^= subL[1]; subR[13] ^= subR[1];
	/* round 12 */
	subL[15] ^= subL[1]; subR[15] ^= subR[1];
	subL[1] ^= subR[1] & ~subR[17];
	dw = subL[1] & subL[17],
		subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
	/* round 14 */
	subL[19] ^= subL[1]; subR[19] ^= subR[1];
	/* round 16 */
	subL[21] ^= subL[1]; subR[21] ^= subR[1];
	/* round 18 */
	subL[23] ^= subL[1]; subR[23] ^= subR[1];
	if (max == 24) {
		/* kw3 */
		subL[24] ^= subL[1]; subR[24] ^= subR[1];

	/* absorb kw4 to other subkeys */
		kw4l = subL[25]; kw4r = subR[25];
	} else {
		subL[1] ^= subR[1] & ~subR[25];
		dw = subL[1] & subL[25],
			subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
		/* round 20 */
		subL[27] ^= subL[1]; subR[27] ^= subR[1];
		/* round 22 */
		subL[29] ^= subL[1]; subR[29] ^= subR[1];
		/* round 24 */
		subL[31] ^= subL[1]; subR[31] ^= subR[1];
		/* kw3 */
		subL[32] ^= subL[1]; subR[32] ^= subR[1];

	/* absorb kw4 to other subkeys */
		kw4l = subL[33]; kw4r = subR[33];
		/* round 23 */
		subL[30] ^= kw4l; subR[30] ^= kw4r;
		/* round 21 */
		subL[28] ^= kw4l; subR[28] ^= kw4r;
		/* round 19 */
		subL[26] ^= kw4l; subR[26] ^= kw4r;
		kw4l ^= kw4r & ~subR[24];
		dw = kw4l & subL[24],
			kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
	}
	/* round 17 */
	subL[22] ^= kw4l; subR[22] ^= kw4r;
	/* round 15 */
	subL[20] ^= kw4l; subR[20] ^= kw4r;
	/* round 13 */
	subL[18] ^= kw4l; subR[18] ^= kw4r;
	kw4l ^= kw4r & ~subR[16];
	dw = kw4l & subL[16],
		kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
	/* round 11 */
	subL[14] ^= kw4l; subR[14] ^= kw4r;
	/* round 9 */
	subL[12] ^= kw4l; subR[12] ^= kw4r;
	/* round 7 */
	subL[10] ^= kw4l; subR[10] ^= kw4r;
	kw4l ^= kw4r & ~subR[8];
	dw = kw4l & subL[8],
		kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
	/* round 5 */
	subL[6] ^= kw4l; subR[6] ^= kw4r;
	/* round 3 */
	subL[4] ^= kw4l; subR[4] ^= kw4r;
	/* round 1 */
	subL[2] ^= kw4l; subR[2] ^= kw4r;
	/* kw1 */
	subL[0] ^= kw4l; subR[0] ^= kw4r;

	/* key XOR is end of F-function */
	SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */
	SUBKEY_R(0) = subR[0] ^ subR[2];
	SUBKEY_L(2) = subL[3];       /* round 1 */
	SUBKEY_R(2) = subR[3];
	SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */
	SUBKEY_R(3) = subR[2] ^ subR[4];
	SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */
	SUBKEY_R(4) = subR[3] ^ subR[5];
	SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */
	SUBKEY_R(5) = subR[4] ^ subR[6];
	SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */
	SUBKEY_R(6) = subR[5] ^ subR[7];
	tl = subL[10] ^ (subR[10] & ~subR[8]);
	dw = tl & subL[8],  /* FL(kl1) */
		tr = subR[10] ^ rol32(dw, 1);
	SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
	SUBKEY_R(7) = subR[6] ^ tr;
	SUBKEY_L(8) = subL[8];       /* FL(kl1) */
	SUBKEY_R(8) = subR[8];
	SUBKEY_L(9) = subL[9];       /* FLinv(kl2) */
	SUBKEY_R(9) = subR[9];
	tl = subL[7] ^ (subR[7] & ~subR[9]);
	dw = tl & subL[9],  /* FLinv(kl2) */
		tr = subR[7] ^ rol32(dw, 1);
	SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
	SUBKEY_R(10) = tr ^ subR[11];
	SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
	SUBKEY_R(11) = subR[10] ^ subR[12];
	SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */
	SUBKEY_R(12) = subR[11] ^ subR[13];
	SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */
	SUBKEY_R(13) = subR[12] ^ subR[14];
	SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */
	SUBKEY_R(14) = subR[13] ^ subR[15];
	tl = subL[18] ^ (subR[18] & ~subR[16]);
	dw = tl & subL[16], /* FL(kl3) */
		tr = subR[18] ^ rol32(dw, 1);
	SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
	SUBKEY_R(15) = subR[14] ^ tr;
	SUBKEY_L(16) = subL[16];     /* FL(kl3) */
	SUBKEY_R(16) = subR[16];
	SUBKEY_L(17) = subL[17];     /* FLinv(kl4) */
	SUBKEY_R(17) = subR[17];
	tl = subL[15] ^ (subR[15] & ~subR[17]);
	dw = tl & subL[17], /* FLinv(kl4) */
		tr = subR[15] ^ rol32(dw, 1);
	SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
	SUBKEY_R(18) = tr ^ subR[19];
	SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
	SUBKEY_R(19) = subR[18] ^ subR[20];
	SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */
	SUBKEY_R(20) = subR[19] ^ subR[21];
	SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */
	SUBKEY_R(21) = subR[20] ^ subR[22];
	SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */
	SUBKEY_R(22) = subR[21] ^ subR[23];
	if (max == 24) {
		SUBKEY_L(23) = subL[22];     /* round 18 */
		SUBKEY_R(23) = subR[22];
		SUBKEY_L(24) = subL[24] ^ subL[23]; /* kw3 */
		SUBKEY_R(24) = subR[24] ^ subR[23];
	} else {
		tl = subL[26] ^ (subR[26] & ~subR[24]);
		dw = tl & subL[24], /* FL(kl5) */
			tr = subR[26] ^ rol32(dw, 1);
		SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
		SUBKEY_R(23) = subR[22] ^ tr;
		SUBKEY_L(24) = subL[24];     /* FL(kl5) */
		SUBKEY_R(24) = subR[24];
		SUBKEY_L(25) = subL[25];     /* FLinv(kl6) */
		SUBKEY_R(25) = subR[25];
		tl = subL[23] ^ (subR[23] & ~subR[25]);
		dw = tl & subL[25], /* FLinv(kl6) */
			tr = subR[23] ^ rol32(dw, 1);
		SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
		SUBKEY_R(26) = tr ^ subR[27];
		SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
		SUBKEY_R(27) = subR[26] ^ subR[28];
		SUBKEY_L(28) = subL[27] ^ subL[29]; /* round 21 */
		SUBKEY_R(28) = subR[27] ^ subR[29];
		SUBKEY_L(29) = subL[28] ^ subL[30]; /* round 22 */
		SUBKEY_R(29) = subR[28] ^ subR[30];
		SUBKEY_L(30) = subL[29] ^ subL[31]; /* round 23 */
		SUBKEY_R(30) = subR[29] ^ subR[31];
		SUBKEY_L(31) = subL[30];     /* round 24 */
		SUBKEY_R(31) = subR[30];
		SUBKEY_L(32) = subL[32] ^ subL[31]; /* kw3 */
		SUBKEY_R(32) = subR[32] ^ subR[31];
	}
}
Exemplo n.º 19
0
static UINT32
SMS4_key_transform(UINT32 x)
{
  UINT32 t = SMS4_sbox_sub(x);
  return t^rol32(t,13)^rol32(t,23);
}
Exemplo n.º 20
0
static inline u32 p0(u32 x)
{
	return x ^ rol32(x, 9) ^ rol32(x, 17);
}
Exemplo n.º 21
0
/* The RIPEMD160 compression function. */
static inline void rmd160_compress(struct rmd160_ctx *ctx, uint32_t *buf)
{
    uint8_t w, round;
    uint32_t T;
    uint32_t AL, BL, CL, DL, EL;    /* left line */
    uint32_t AR, BR, CR, DR, ER;    /* right line */
    uint32_t X[16];

    /* Byte-swap the buffer if we're on a big-endian machine */
    cpu_to_le32_array(X, buf, 16);

    /* Load the left and right lines with the initial state */
    AL = AR = ctx->h[0];
    BL = BR = ctx->h[1];
    CL = CR = ctx->h[2];
    DL = DR = ctx->h[3];
    EL = ER = ctx->h[4];

    /* Round 1 */
    round = 0;
    for (w = 0; w < 16; w++) { /* left line */
        T = rol32(AL + F1(BL, CL, DL) + X[RL[round][w]] + KL[round], SL[round][w]) + EL;
        AL = EL; EL = DL; DL = rol32(CL, 10); CL = BL; BL = T;
    }
    for (w = 0; w < 16; w++) { /* right line */
        T = rol32(AR + F5(BR, CR, DR) + X[RR[round][w]] + KR[round], SR[round][w]) + ER;
        AR = ER; ER = DR; DR = rol32(CR, 10); CR = BR; BR = T;
    }

    /* Round 2 */
    round++;
    for (w = 0; w < 16; w++) { /* left line */
        T = rol32(AL + F2(BL, CL, DL) + X[RL[round][w]] + KL[round], SL[round][w]) + EL;
        AL = EL; EL = DL; DL = rol32(CL, 10); CL = BL; BL = T;
    }
    for (w = 0; w < 16; w++) { /* right line */
        T = rol32(AR + F4(BR, CR, DR) + X[RR[round][w]] + KR[round], SR[round][w]) + ER;
        AR = ER; ER = DR; DR = rol32(CR, 10); CR = BR; BR = T;
    }

    /* Round 3 */
    round++;
    for (w = 0; w < 16; w++) { /* left line */
        T = rol32(AL + F3(BL, CL, DL) + X[RL[round][w]] + KL[round], SL[round][w]) + EL;
        AL = EL; EL = DL; DL = rol32(CL, 10); CL = BL; BL = T;
    }
    for (w = 0; w < 16; w++) { /* right line */
        T = rol32(AR + F3(BR, CR, DR) + X[RR[round][w]] + KR[round], SR[round][w]) + ER;
        AR = ER; ER = DR; DR = rol32(CR, 10); CR = BR; BR = T;
    }

    /* Round 4 */
    round++;
    for (w = 0; w < 16; w++) { /* left line */
        T = rol32(AL + F4(BL, CL, DL) + X[RL[round][w]] + KL[round], SL[round][w]) + EL;
        AL = EL; EL = DL; DL = rol32(CL, 10); CL = BL; BL = T;
    }
    for (w = 0; w < 16; w++) { /* right line */
        T = rol32(AR + F2(BR, CR, DR) + X[RR[round][w]] + KR[round], SR[round][w]) + ER;
        AR = ER; ER = DR; DR = rol32(CR, 10); CR = BR; BR = T;
    }

    /* Round 5 */
    round++;
    for (w = 0; w < 16; w++) { /* left line */
        T = rol32(AL + F5(BL, CL, DL) + X[RL[round][w]] + KL[round], SL[round][w]) + EL;
        AL = EL; EL = DL; DL = rol32(CL, 10); CL = BL; BL = T;
    }
    for (w = 0; w < 16; w++) { /* right line */
        T = rol32(AR + F1(BR, CR, DR) + X[RR[round][w]] + KR[round], SR[round][w]) + ER;
        AR = ER; ER = DR; DR = rol32(CR, 10); CR = BR; BR = T;
    }

    /* Final mixing stage */
    T = ctx->h[1] + CL + DR;
    ctx->h[1] = ctx->h[2] + DL + ER;
    ctx->h[2] = ctx->h[3] + EL + AR;
    ctx->h[3] = ctx->h[4] + AL + BR;
    ctx->h[4] = ctx->h[0] + BL + CR;
    ctx->h[0] = T;
}
Exemplo n.º 22
0
static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
			    unsigned int key_len)
{
	/*
	 * The AES key schedule round constants
	 */
	static u8 const rcon[] = {
		0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36,
	};

	u32 kwords = key_len / sizeof(u32);
	struct aes_block *key_enc, *key_dec;
	int i, j;

	if (key_len != AES_KEYSIZE_128 &&
	    key_len != AES_KEYSIZE_192 &&
	    key_len != AES_KEYSIZE_256)
		return -EINVAL;

	memcpy(ctx->key_enc, in_key, key_len);
	ctx->key_length = key_len;

	kernel_neon_begin();
	for (i = 0; i < sizeof(rcon); i++) {
		u32 *rki = ctx->key_enc + (i * kwords);
		u32 *rko = rki + kwords;

#ifndef CONFIG_CPU_BIG_ENDIAN
		rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
		rko[0] = rko[0] ^ rki[0] ^ rcon[i];
#else
		rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8);
		rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24);
#endif
		rko[1] = rko[0] ^ rki[1];
		rko[2] = rko[1] ^ rki[2];
		rko[3] = rko[2] ^ rki[3];

		if (key_len == AES_KEYSIZE_192) {
			if (i >= 7)
				break;
			rko[4] = rko[3] ^ rki[4];
			rko[5] = rko[4] ^ rki[5];
		} else if (key_len == AES_KEYSIZE_256) {
			if (i >= 6)
				break;
			rko[4] = ce_aes_sub(rko[3]) ^ rki[4];
			rko[5] = rko[4] ^ rki[5];
			rko[6] = rko[5] ^ rki[6];
			rko[7] = rko[6] ^ rki[7];
		}
	}

	/*
	 * Generate the decryption keys for the Equivalent Inverse Cipher.
	 * This involves reversing the order of the round keys, and applying
	 * the Inverse Mix Columns transformation on all but the first and
	 * the last one.
	 */
	key_enc = (struct aes_block *)ctx->key_enc;
	key_dec = (struct aes_block *)ctx->key_dec;
	j = num_rounds(ctx);

	key_dec[0] = key_enc[j];
	for (i = 1, j--; j > 0; i++, j--)
		ce_aes_invert(key_dec + i, key_enc + j);
	key_dec[i] = key_enc[0];

	kernel_neon_end();
	return 0;
}
Exemplo n.º 23
0
static void nwsign(char *r_data1, char *r_data2, char *outdata) {
 int i;
 unsigned int w0,w1,w2,w3;
 static int rbit[4]={0, 2, 1, 3};
#ifdef __i386__
 unsigned int *data2=(int *)r_data2;
#else
 unsigned int data2[16];
 for (i=0;i<16;i++)
  data2[i]=GET_LE32(r_data2+(i<<2));
#endif 
 w0=GET_LE32(r_data1);
 w1=GET_LE32(r_data1+4);
 w2=GET_LE32(r_data1+8);
 w3=GET_LE32(r_data1+12);
 for (i=0;i<16;i+=4) {
  w0=rol32(w0 + ((w1 & w2) | ((~w1) & w3)) + data2[i+0],3);
  w3=rol32(w3 + ((w0 & w1) | ((~w0) & w2)) + data2[i+1],7);
  w2=rol32(w2 + ((w3 & w0) | ((~w3) & w1)) + data2[i+2],11);
  w1=rol32(w1 + ((w2 & w3) | ((~w2) & w0)) + data2[i+3],19);
 }
 for (i=0;i<4;i++) {
  w0=rol32(w0 + (((w2 | w3) & w1) | (w2 & w3)) + 0x5a827999 + data2[i+0],3);
  w3=rol32(w3 + (((w1 | w2) & w0) | (w1 & w2)) + 0x5a827999 + data2[i+4],5);
  w2=rol32(w2 + (((w0 | w1) & w3) | (w0 & w1)) + 0x5a827999 + data2[i+8],9);
  w1=rol32(w1 + (((w3 | w0) & w2) | (w3 & w0)) + 0x5a827999 + data2[i+12],13);
 }
 for (i=0;i<4;i++) {
  w0=rol32(w0 + ((w1 ^ w2) ^ w3) + 0x6ed9eba1 + data2[rbit[i]+0],3);
  w3=rol32(w3 + ((w0 ^ w1) ^ w2) + 0x6ed9eba1 + data2[rbit[i]+8],9);
  w2=rol32(w2 + ((w3 ^ w0) ^ w1) + 0x6ed9eba1 + data2[rbit[i]+4],11);
  w1=rol32(w1 + ((w2 ^ w3) ^ w0) + 0x6ed9eba1 + data2[rbit[i]+12],15);
 }
 PUT_LE32(outdata,(w0+GET_LE32(r_data1)) & 0xffffffff);
 PUT_LE32(outdata+4,(w1+GET_LE32(r_data1+4)) & 0xffffffff);
 PUT_LE32(outdata+8,(w2+GET_LE32(r_data1+8)) & 0xffffffff);
 PUT_LE32(outdata+12,(w3+GET_LE32(r_data1+12)) & 0xffffffff);
}
Exemplo n.º 24
0
/**
 * 
 *  rct2: 0x006A8B40
 */
void object_list_load()
{
	HANDLE hFindFile;
	WIN32_FIND_DATAA findFileData;
	int totalFiles = 0, totalFileSize = 0, fileDateModifiedChecksum = 0;

	// Enumerate through each object in the directory
	hFindFile = FindFirstFile(RCT2_ADDRESS(RCT2_ADDRESS_OBJECT_DATA_PATH, char), &findFileData);
	if (hFindFile != INVALID_HANDLE_VALUE) {
		do {
			totalFiles++;
			totalFileSize += findFileData.nFileSizeLow;
			fileDateModifiedChecksum ^=
				findFileData.ftLastWriteTime.dwLowDateTime ^
				findFileData.ftLastWriteTime.dwHighDateTime;
			fileDateModifiedChecksum = ror32(fileDateModifiedChecksum, 5);
		} while (FindNextFile(hFindFile, &findFileData));
		FindClose(hFindFile);
	}

	totalFiles = ror32(totalFiles, 24);
	totalFiles = (totalFiles & ~0xFF) | 1;
	totalFiles = rol32(totalFiles, 24);

	// Read plugin header
	rct_plugin_header pluginHeader;
	FILE *file = fopen(get_file_path(PATH_ID_PLUGIN), "rb");
	if (file != NULL) {
		if (fread(&pluginHeader, sizeof(pluginHeader), 1, file) == 1) {
			// Check if object repository has changed in anyway
			if (
				totalFiles == pluginHeader.total_files &&
				totalFileSize == pluginHeader.total_file_size &&
				fileDateModifiedChecksum == pluginHeader.date_modified_checksum
			) {
				// Dispose installed object list
				if (RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, sint32) != -1) {
					rct2_free(RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, void*));
					RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, sint32) = -1;
				}

				// Read installed object list
				RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, void*) = rct2_malloc(pluginHeader.object_list_size);
				if (fread(RCT2_GLOBAL(RCT2_ADDRESS_INSTALLED_OBJECT_LIST, void*), pluginHeader.object_list_size, 1, file) == 1) {
					RCT2_GLOBAL(0x00F42B6C, uint32) = pluginHeader.var_10;

					fclose(file);
					RCT2_CALLPROC_EBPSAFE(0x006A9FC0);
					object_list_examine();
					return;
				}
			}
		}
		fclose(file);
	}

	// Reload object list
	RCT2_GLOBAL(0x00F42B94, uint32) = totalFiles;
	RCT2_GLOBAL(0x00F42B98, uint32) = totalFileSize;
	RCT2_GLOBAL(0x00F42B9C, uint32) = fileDateModifiedChecksum;
	RCT2_CALLPROC_EBPSAFE(0x006A8D8F);
}
Exemplo n.º 25
0
/* Interpret pseudo code in tb. */
tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
{
    long tcg_temps[CPU_TEMP_BUF_NLONGS];
    uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
    tcg_target_ulong next_tb = 0;

    tci_reg[TCG_AREG0] = (tcg_target_ulong)env;
    tci_reg[TCG_REG_CALL_STACK] = sp_value;
    assert(tb_ptr);

    for (;;) {
        TCGOpcode opc = tb_ptr[0];
#if !defined(NDEBUG)
        uint8_t op_size = tb_ptr[1];
        uint8_t *old_code_ptr = tb_ptr;
#endif
        tcg_target_ulong t0;
        tcg_target_ulong t1;
        tcg_target_ulong t2;
        tcg_target_ulong label;
        TCGCond condition;
        target_ulong taddr;
#ifndef CONFIG_SOFTMMU
        tcg_target_ulong host_addr;
#endif
        uint8_t tmp8;
        uint16_t tmp16;
        uint32_t tmp32;
        uint64_t tmp64;
#if TCG_TARGET_REG_BITS == 32
        uint64_t v64;
#endif

#if defined(GETPC)
        tci_tb_ptr = (uintptr_t)tb_ptr;
#endif

        /* Skip opcode and size entry. */
        tb_ptr += 2;

        switch (opc) {
        case INDEX_op_end:
        case INDEX_op_nop:
            break;
        case INDEX_op_nop1:
        case INDEX_op_nop2:
        case INDEX_op_nop3:
        case INDEX_op_nopn:
        case INDEX_op_discard:
            TODO();
            break;
        case INDEX_op_set_label:
            TODO();
            break;
        case INDEX_op_call:
            t0 = tci_read_ri(&tb_ptr);
#if TCG_TARGET_REG_BITS == 32
            tmp64 = ((helper_function)t0)(tci_read_reg(TCG_REG_R0),
                                          tci_read_reg(TCG_REG_R1),
                                          tci_read_reg(TCG_REG_R2),
                                          tci_read_reg(TCG_REG_R3),
                                          tci_read_reg(TCG_REG_R5),
                                          tci_read_reg(TCG_REG_R6),
                                          tci_read_reg(TCG_REG_R7),
                                          tci_read_reg(TCG_REG_R8),
                                          tci_read_reg(TCG_REG_R9),
                                          tci_read_reg(TCG_REG_R10));
            tci_write_reg(TCG_REG_R0, tmp64);
            tci_write_reg(TCG_REG_R1, tmp64 >> 32);
#else
            tmp64 = ((helper_function)t0)(tci_read_reg(TCG_REG_R0),
                                          tci_read_reg(TCG_REG_R1),
                                          tci_read_reg(TCG_REG_R2),
                                          tci_read_reg(TCG_REG_R3),
                                          tci_read_reg(TCG_REG_R5));
            tci_write_reg(TCG_REG_R0, tmp64);
#endif
            break;
        case INDEX_op_br:
            label = tci_read_label(&tb_ptr);
            assert(tb_ptr == old_code_ptr + op_size);
            tb_ptr = (uint8_t *)label;
            continue;
        case INDEX_op_setcond_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            condition = *tb_ptr++;
            tci_write_reg32(t0, tci_compare32(t1, t2, condition));
            break;
#if TCG_TARGET_REG_BITS == 32
        case INDEX_op_setcond2_i32:
            t0 = *tb_ptr++;
            tmp64 = tci_read_r64(&tb_ptr);
            v64 = tci_read_ri64(&tb_ptr);
            condition = *tb_ptr++;
            tci_write_reg32(t0, tci_compare64(tmp64, v64, condition));
            break;
#elif TCG_TARGET_REG_BITS == 64
        case INDEX_op_setcond_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            condition = *tb_ptr++;
            tci_write_reg64(t0, tci_compare64(t1, t2, condition));
            break;
#endif
        case INDEX_op_mov_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;
        case INDEX_op_movi_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_i32(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;

            /* Load/store operations (32 bit). */

        case INDEX_op_ld8u_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg8(t0, *(uint8_t *)(t1 + t2));
            break;
        case INDEX_op_ld8s_i32:
        case INDEX_op_ld16u_i32:
            TODO();
            break;
        case INDEX_op_ld16s_i32:
            TODO();
            break;
        case INDEX_op_ld_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg32(t0, *(uint32_t *)(t1 + t2));
            break;
        case INDEX_op_st8_i32:
            t0 = tci_read_r8(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            *(uint8_t *)(t1 + t2) = t0;
            break;
        case INDEX_op_st16_i32:
            t0 = tci_read_r16(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            *(uint16_t *)(t1 + t2) = t0;
            break;
        case INDEX_op_st_i32:
            t0 = tci_read_r32(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            assert(t1 != sp_value || (int32_t)t2 < 0);
            *(uint32_t *)(t1 + t2) = t0;
            break;

            /* Arithmetic operations (32 bit). */

        case INDEX_op_add_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 + t2);
            break;
        case INDEX_op_sub_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 - t2);
            break;
        case INDEX_op_mul_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 * t2);
            break;
#if TCG_TARGET_HAS_div_i32
        case INDEX_op_div_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, (int32_t)t1 / (int32_t)t2);
            break;
        case INDEX_op_divu_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 / t2);
            break;
        case INDEX_op_rem_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, (int32_t)t1 % (int32_t)t2);
            break;
        case INDEX_op_remu_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 % t2);
            break;
#elif TCG_TARGET_HAS_div2_i32
        case INDEX_op_div2_i32:
        case INDEX_op_divu2_i32:
            TODO();
            break;
#endif
        case INDEX_op_and_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 & t2);
            break;
        case INDEX_op_or_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 | t2);
            break;
        case INDEX_op_xor_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 ^ t2);
            break;

            /* Shift/rotate operations (32 bit). */

        case INDEX_op_shl_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 << t2);
            break;
        case INDEX_op_shr_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, t1 >> t2);
            break;
        case INDEX_op_sar_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, ((int32_t)t1 >> t2));
            break;
#if TCG_TARGET_HAS_rot_i32
        case INDEX_op_rotl_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, rol32(t1, t2));
            break;
        case INDEX_op_rotr_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_ri32(&tb_ptr);
            t2 = tci_read_ri32(&tb_ptr);
            tci_write_reg32(t0, ror32(t1, t2));
            break;
#endif
#if TCG_TARGET_HAS_deposit_i32
        case INDEX_op_deposit_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            t2 = tci_read_r32(&tb_ptr);
            tmp16 = *tb_ptr++;
            tmp8 = *tb_ptr++;
            tmp32 = (((1 << tmp8) - 1) << tmp16);
            tci_write_reg32(t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
            break;
#endif
        case INDEX_op_brcond_i32:
            t0 = tci_read_r32(&tb_ptr);
            t1 = tci_read_ri32(&tb_ptr);
            condition = *tb_ptr++;
            label = tci_read_label(&tb_ptr);
            if (tci_compare32(t0, t1, condition)) {
                assert(tb_ptr == old_code_ptr + op_size);
                tb_ptr = (uint8_t *)label;
                continue;
            }
            break;
#if TCG_TARGET_REG_BITS == 32
        case INDEX_op_add2_i32:
            t0 = *tb_ptr++;
            t1 = *tb_ptr++;
            tmp64 = tci_read_r64(&tb_ptr);
            tmp64 += tci_read_r64(&tb_ptr);
            tci_write_reg64(t1, t0, tmp64);
            break;
        case INDEX_op_sub2_i32:
            t0 = *tb_ptr++;
            t1 = *tb_ptr++;
            tmp64 = tci_read_r64(&tb_ptr);
            tmp64 -= tci_read_r64(&tb_ptr);
            tci_write_reg64(t1, t0, tmp64);
            break;
        case INDEX_op_brcond2_i32:
            tmp64 = tci_read_r64(&tb_ptr);
            v64 = tci_read_ri64(&tb_ptr);
            condition = *tb_ptr++;
            label = tci_read_label(&tb_ptr);
            if (tci_compare64(tmp64, v64, condition)) {
                assert(tb_ptr == old_code_ptr + op_size);
                tb_ptr = (uint8_t *)label;
                continue;
            }
            break;
        case INDEX_op_mulu2_i32:
            t0 = *tb_ptr++;
            t1 = *tb_ptr++;
            t2 = tci_read_r32(&tb_ptr);
            tmp64 = tci_read_r32(&tb_ptr);
            tci_write_reg64(t1, t0, t2 * tmp64);
            break;
#endif /* TCG_TARGET_REG_BITS == 32 */
#if TCG_TARGET_HAS_ext8s_i32
        case INDEX_op_ext8s_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r8s(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext16s_i32
        case INDEX_op_ext16s_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r16s(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext8u_i32
        case INDEX_op_ext8u_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r8(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext16u_i32
        case INDEX_op_ext16u_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r16(&tb_ptr);
            tci_write_reg32(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_bswap16_i32
        case INDEX_op_bswap16_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r16(&tb_ptr);
            tci_write_reg32(t0, bswap16(t1));
            break;
#endif
#if TCG_TARGET_HAS_bswap32_i32
        case INDEX_op_bswap32_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg32(t0, bswap32(t1));
            break;
#endif
#if TCG_TARGET_HAS_not_i32
        case INDEX_op_not_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg32(t0, ~t1);
            break;
#endif
#if TCG_TARGET_HAS_neg_i32
        case INDEX_op_neg_i32:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg32(t0, -t1);
            break;
#endif
#if TCG_TARGET_REG_BITS == 64
        case INDEX_op_mov_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
        case INDEX_op_movi_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_i64(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;

            /* Load/store operations (64 bit). */

        case INDEX_op_ld8u_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg8(t0, *(uint8_t *)(t1 + t2));
            break;
        case INDEX_op_ld8s_i64:
        case INDEX_op_ld16u_i64:
        case INDEX_op_ld16s_i64:
            TODO();
            break;
        case INDEX_op_ld32u_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg32(t0, *(uint32_t *)(t1 + t2));
            break;
        case INDEX_op_ld32s_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg32s(t0, *(int32_t *)(t1 + t2));
            break;
        case INDEX_op_ld_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            tci_write_reg64(t0, *(uint64_t *)(t1 + t2));
            break;
        case INDEX_op_st8_i64:
            t0 = tci_read_r8(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            *(uint8_t *)(t1 + t2) = t0;
            break;
        case INDEX_op_st16_i64:
            t0 = tci_read_r16(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            *(uint16_t *)(t1 + t2) = t0;
            break;
        case INDEX_op_st32_i64:
            t0 = tci_read_r32(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            *(uint32_t *)(t1 + t2) = t0;
            break;
        case INDEX_op_st_i64:
            t0 = tci_read_r64(&tb_ptr);
            t1 = tci_read_r(&tb_ptr);
            t2 = tci_read_s32(&tb_ptr);
            assert(t1 != sp_value || (int32_t)t2 < 0);
            *(uint64_t *)(t1 + t2) = t0;
            break;

            /* Arithmetic operations (64 bit). */

        case INDEX_op_add_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 + t2);
            break;
        case INDEX_op_sub_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 - t2);
            break;
        case INDEX_op_mul_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 * t2);
            break;
#if TCG_TARGET_HAS_div_i64
        case INDEX_op_div_i64:
        case INDEX_op_divu_i64:
        case INDEX_op_rem_i64:
        case INDEX_op_remu_i64:
            TODO();
            break;
#elif TCG_TARGET_HAS_div2_i64
        case INDEX_op_div2_i64:
        case INDEX_op_divu2_i64:
            TODO();
            break;
#endif
        case INDEX_op_and_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 & t2);
            break;
        case INDEX_op_or_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 | t2);
            break;
        case INDEX_op_xor_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 ^ t2);
            break;

            /* Shift/rotate operations (64 bit). */

        case INDEX_op_shl_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 << t2);
            break;
        case INDEX_op_shr_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, t1 >> t2);
            break;
        case INDEX_op_sar_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, ((int64_t)t1 >> t2));
            break;
#if TCG_TARGET_HAS_rot_i64
        case INDEX_op_rotl_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, rol64(t1, t2));
            break;
        case INDEX_op_rotr_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_ri64(&tb_ptr);
            t2 = tci_read_ri64(&tb_ptr);
            tci_write_reg64(t0, ror64(t1, t2));
            break;
#endif
#if TCG_TARGET_HAS_deposit_i64
        case INDEX_op_deposit_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            t2 = tci_read_r64(&tb_ptr);
            tmp16 = *tb_ptr++;
            tmp8 = *tb_ptr++;
            tmp64 = (((1ULL << tmp8) - 1) << tmp16);
            tci_write_reg64(t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64));
            break;
#endif
        case INDEX_op_brcond_i64:
            t0 = tci_read_r64(&tb_ptr);
            t1 = tci_read_ri64(&tb_ptr);
            condition = *tb_ptr++;
            label = tci_read_label(&tb_ptr);
            if (tci_compare64(t0, t1, condition)) {
                assert(tb_ptr == old_code_ptr + op_size);
                tb_ptr = (uint8_t *)label;
                continue;
            }
            break;
#if TCG_TARGET_HAS_ext8u_i64
        case INDEX_op_ext8u_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r8(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext8s_i64
        case INDEX_op_ext8s_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r8s(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext16s_i64
        case INDEX_op_ext16s_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r16s(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext16u_i64
        case INDEX_op_ext16u_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r16(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext32s_i64
        case INDEX_op_ext32s_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r32s(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_ext32u_i64
        case INDEX_op_ext32u_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg64(t0, t1);
            break;
#endif
#if TCG_TARGET_HAS_bswap16_i64
        case INDEX_op_bswap16_i64:
            TODO();
            t0 = *tb_ptr++;
            t1 = tci_read_r16(&tb_ptr);
            tci_write_reg64(t0, bswap16(t1));
            break;
#endif
#if TCG_TARGET_HAS_bswap32_i64
        case INDEX_op_bswap32_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r32(&tb_ptr);
            tci_write_reg64(t0, bswap32(t1));
            break;
#endif
#if TCG_TARGET_HAS_bswap64_i64
        case INDEX_op_bswap64_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            tci_write_reg64(t0, bswap64(t1));
            break;
#endif
#if TCG_TARGET_HAS_not_i64
        case INDEX_op_not_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            tci_write_reg64(t0, ~t1);
            break;
#endif
#if TCG_TARGET_HAS_neg_i64
        case INDEX_op_neg_i64:
            t0 = *tb_ptr++;
            t1 = tci_read_r64(&tb_ptr);
            tci_write_reg64(t0, -t1);
            break;
#endif
#endif /* TCG_TARGET_REG_BITS == 64 */

            /* QEMU specific operations. */

#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
        case INDEX_op_debug_insn_start:
            TODO();
            break;
#else
        case INDEX_op_debug_insn_start:
            TODO();
            break;
#endif
        case INDEX_op_exit_tb:
            next_tb = *(uint64_t *)tb_ptr;
            goto exit;
            break;
        case INDEX_op_goto_tb:
            t0 = tci_read_i32(&tb_ptr);
            assert(tb_ptr == old_code_ptr + op_size);
            tb_ptr += (int32_t)t0;
            continue;
        case INDEX_op_qemu_ld8u:
            t0 = *tb_ptr++;
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            tmp8 = helper_ldb_mmu(env, taddr, tci_read_i(&tb_ptr));
#else
            host_addr = (tcg_target_ulong)taddr;
            tmp8 = *(uint8_t *)(host_addr + GUEST_BASE);
#endif
            tci_write_reg8(t0, tmp8);
            break;
        case INDEX_op_qemu_ld8s:
            t0 = *tb_ptr++;
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            tmp8 = helper_ldb_mmu(env, taddr, tci_read_i(&tb_ptr));
#else
            host_addr = (tcg_target_ulong)taddr;
            tmp8 = *(uint8_t *)(host_addr + GUEST_BASE);
#endif
            tci_write_reg8s(t0, tmp8);
            break;
        case INDEX_op_qemu_ld16u:
            t0 = *tb_ptr++;
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            tmp16 = helper_ldw_mmu(env, taddr, tci_read_i(&tb_ptr));
#else
            host_addr = (tcg_target_ulong)taddr;
            tmp16 = tswap16(*(uint16_t *)(host_addr + GUEST_BASE));
#endif
            tci_write_reg16(t0, tmp16);
            break;
        case INDEX_op_qemu_ld16s:
            t0 = *tb_ptr++;
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            tmp16 = helper_ldw_mmu(env, taddr, tci_read_i(&tb_ptr));
#else
            host_addr = (tcg_target_ulong)taddr;
            tmp16 = tswap16(*(uint16_t *)(host_addr + GUEST_BASE));
#endif
            tci_write_reg16s(t0, tmp16);
            break;
#if TCG_TARGET_REG_BITS == 64
        case INDEX_op_qemu_ld32u:
            t0 = *tb_ptr++;
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            tmp32 = helper_ldl_mmu(env, taddr, tci_read_i(&tb_ptr));
#else
            host_addr = (tcg_target_ulong)taddr;
            tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE));
#endif
            tci_write_reg32(t0, tmp32);
            break;
        case INDEX_op_qemu_ld32s:
            t0 = *tb_ptr++;
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            tmp32 = helper_ldl_mmu(env, taddr, tci_read_i(&tb_ptr));
#else
            host_addr = (tcg_target_ulong)taddr;
            tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE));
#endif
            tci_write_reg32s(t0, tmp32);
            break;
#endif /* TCG_TARGET_REG_BITS == 64 */
        case INDEX_op_qemu_ld32:
            t0 = *tb_ptr++;
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            tmp32 = helper_ldl_mmu(env, taddr, tci_read_i(&tb_ptr));
#else
            host_addr = (tcg_target_ulong)taddr;
            tmp32 = tswap32(*(uint32_t *)(host_addr + GUEST_BASE));
#endif
            tci_write_reg32(t0, tmp32);
            break;
        case INDEX_op_qemu_ld64:
            t0 = *tb_ptr++;
#if TCG_TARGET_REG_BITS == 32
            t1 = *tb_ptr++;
#endif
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            tmp64 = helper_ldq_mmu(env, taddr, tci_read_i(&tb_ptr));
#else
            host_addr = (tcg_target_ulong)taddr;
            tmp64 = tswap64(*(uint64_t *)(host_addr + GUEST_BASE));
#endif
            tci_write_reg(t0, tmp64);
#if TCG_TARGET_REG_BITS == 32
            tci_write_reg(t1, tmp64 >> 32);
#endif
            break;
        case INDEX_op_qemu_st8:
            t0 = tci_read_r8(&tb_ptr);
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            t2 = tci_read_i(&tb_ptr);
            helper_stb_mmu(env, taddr, t0, t2);
#else
            host_addr = (tcg_target_ulong)taddr;
            *(uint8_t *)(host_addr + GUEST_BASE) = t0;
#endif
            break;
        case INDEX_op_qemu_st16:
            t0 = tci_read_r16(&tb_ptr);
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            t2 = tci_read_i(&tb_ptr);
            helper_stw_mmu(env, taddr, t0, t2);
#else
            host_addr = (tcg_target_ulong)taddr;
            *(uint16_t *)(host_addr + GUEST_BASE) = tswap16(t0);
#endif
            break;
        case INDEX_op_qemu_st32:
            t0 = tci_read_r32(&tb_ptr);
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            t2 = tci_read_i(&tb_ptr);
            helper_stl_mmu(env, taddr, t0, t2);
#else
            host_addr = (tcg_target_ulong)taddr;
            *(uint32_t *)(host_addr + GUEST_BASE) = tswap32(t0);
#endif
            break;
        case INDEX_op_qemu_st64:
            tmp64 = tci_read_r64(&tb_ptr);
            taddr = tci_read_ulong(&tb_ptr);
#ifdef CONFIG_SOFTMMU
            t2 = tci_read_i(&tb_ptr);
            helper_stq_mmu(env, taddr, tmp64, t2);
#else
            host_addr = (tcg_target_ulong)taddr;
            *(uint64_t *)(host_addr + GUEST_BASE) = tswap64(tmp64);
#endif
            break;
        default:
            TODO();
            break;
        }
        assert(tb_ptr == old_code_ptr + op_size);
    }
exit:
    return next_tb;
}
Exemplo n.º 26
0
static void sha1_process(sha1_context_t *ctx, const uint8_t *in)
{
	const uint32_t k[] = {
		0x5A827999,
		0x6ED9EBA1,
		0x8F1BBCDC,
		0xCA62C1D6,
	};
	uint32_t w[80], temp, a, b, c, d, e;
	int i, j;

	for (i = 0, j = 0; i < 16; i++, j += 4)
		w[i] = (in[j] << 24) | (in[j+1] << 16) |
			(in[j+2] << 8) | in[j+3];
	for (; i < 80; i++)
		w[i] = rol32(w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16], 1);

	a = ctx->h[0];
	b = ctx->h[1];
	c = ctx->h[2];
	d = ctx->h[3];
	e = ctx->h[4];

	for (i = 0; i < 20; i++) {
		temp = rol32(a, 5) + f0(b, c, d) + e + w[i] + k[0];

		e = d;
		d = c;
		c = rol32(b, 30);
		b = a;
		a = temp;
	}
	for (; i < 40; i++) {
		temp = rol32(a, 5) + f1(b, c, d) + e + w[i] + k[1];

		e = d;
		d = c;
		c = rol32(b, 30);
		b = a;
		a = temp;
	}
	for (; i < 60; i++) {
		temp = rol32(a, 5) + f2(b, c, d) + e + w[i] + k[2];

		e = d;
		d = c;
		c = rol32(b, 30);
		b = a;
		a = temp;
	}
	for (; i < 80; i++) {
		temp = rol32(a, 5) + f1(b, c, d) + e + w[i] + k[3];

		e = d;
		d = c;
		c = rol32(b, 30);
		b = a;
		a = temp;
	}

	ctx->h[0] += a;
	ctx->h[1] += b;
	ctx->h[2] += c;
	ctx->h[3] += d;
	ctx->h[4] += e;
}