Пример #1
0
CORE_ADDR
get_pc_function_start (CORE_ADDR pc)
{
  struct block *bl;
  struct minimal_symbol *msymbol;

  bl = block_for_pc (pc);
  if (bl)
    {
      struct symbol *symbol = block_function (bl);

      if (symbol)
	{
	  bl = SYMBOL_BLOCK_VALUE (symbol);
	  /* APPLE LOCAL begin address ranges  */
	  return BLOCK_LOWEST_PC (bl);
	  /* APPLE LOCAL end address ranges  */
	}
    }

  msymbol = lookup_minimal_symbol_by_pc (pc);
  if (msymbol)
    {
      CORE_ADDR fstart = SYMBOL_VALUE_ADDRESS (msymbol);

      if (find_pc_section (fstart))
	return fstart;
    }

  return 0;
}
Пример #2
0
CORE_ADDR
get_pc_function_start (CORE_ADDR pc)
{
    struct block *bl;
    struct minimal_symbol *msymbol;

    bl = block_for_pc (pc);
    if (bl)
    {
        struct symbol *symbol = block_function (bl);

        if (symbol)
        {
            bl = SYMBOL_BLOCK_VALUE (symbol);
            return BLOCK_START (bl);
        }
    }

    msymbol = lookup_minimal_symbol_by_pc (pc);
    if (msymbol)
    {
        CORE_ADDR fstart = SYMBOL_VALUE_ADDRESS (msymbol);

        if (find_pc_section (fstart))
            return fstart;
    }

    return 0;
}
Пример #3
0
//helper function. Retrieves an AES block, regardless of whether it is aligned or not with the underlying sector geometry
static inline int get_misaligned_block(ctr_crypto_interface *io, size_t sector, size_t sector_size, size_t block_size, uint8_t *buffer, void (*block_function)(void *io, void *buffer, uint64_t block, size_t block_count))
{
	size_t sectors_to_copy_prior = get_chunks_to_complete_relative_chunk_backwards(sector, sector_size, block_size);
	size_t sectors_to_copy_after = get_chunks_to_complete_relative_chunk(sector, sector_size, block_size);
	if (!sectors_to_copy_after)
		sectors_to_copy_after = block_size;

	size_t sector_count = sectors_to_copy_prior + sectors_to_copy_after;
	size_t buf_size = sector_count * sector_size;
	uint8_t buf[buf_size];
	int res = ctr_io_read_sector(io->lower_io, buf, buf_size, sector - sectors_to_copy_prior, sector_count);

	if (res)
		return res;

	size_t current_block = get_prev_relative_chunk(sector, sector_size, block_size);
	uint64_t block_pos = get_chunk_position(current_block, block_size);
	uint64_t block_start_offset = block_pos - get_chunk_position(sector - sectors_to_copy_prior, sector_size);
	uint8_t *pos = buf + block_start_offset;

	block_function(io, pos, current_block, 1);
	memcpy(buffer, pos, block_size);

	return res;
}
Пример #4
0
struct symbol *
get_frame_function (struct frame_info *frame)
{
  struct block *bl = get_frame_block (frame, 0);
  if (bl == 0)
    return 0;
  return block_function (bl);
}
Пример #5
0
struct symbol *
find_pc_sect_function (CORE_ADDR pc, struct bfd_section *section)
{
    struct block *b = block_for_pc_sect (pc, section);
    if (b == 0)
        return 0;
    return block_function (b);
}
Пример #6
0
struct symbol *
find_pc_sect_function (CORE_ADDR pc, struct bfd_section *section)
{
  /* APPLE LOCAL begin cache lookup values for improved performance  */
  struct block *b;
  /* APPLE LOCAL inlined function symbols & blocks  */
  struct symbol *func_sym;

  if (pc == last_function_lookup_pc
      && pc == last_mapped_section_lookup_pc
      && section == cached_mapped_section
      && cached_pc_function)
    return cached_pc_function;

  last_function_lookup_pc = pc;

  b = block_for_pc_sect (pc, section);
  if (b == 0)
    {
      cached_pc_function = NULL;
      return 0;
    }

  /* APPLE LOCAL begin inlined function symbols & blocks  */
  func_sym = NULL;
  if (BLOCK_FUNCTION (b) == 0)
    {
      /* APPLE LOCAL radar 6381384, add section to symtab lookups.  */
      func_sym = block_inlined_function (b, section);
    }

  if (!func_sym)
    func_sym = block_function (b);

  cached_pc_function = func_sym;
  return func_sym;
  /* APPLE LOCAL end inlined function symbols & blocks  */
  /* APPLE LOCAL end cache lookup values for improved performance  */
}
Пример #7
0
CORE_ADDR
get_pc_function_start (CORE_ADDR pc)
{
  register struct block *bl;
  register struct symbol *symbol;
  register struct minimal_symbol *msymbol;
  CORE_ADDR fstart;

  if ((bl = block_for_pc (pc)) != NULL &&
      (symbol = block_function (bl)) != NULL)
    {
      bl = SYMBOL_BLOCK_VALUE (symbol);
      fstart = BLOCK_START (bl);
    }
  else if ((msymbol = lookup_minimal_symbol_by_pc (pc)) != NULL)
    {
      fstart = SYMBOL_VALUE_ADDRESS (msymbol);
    }
  else
    {
      fstart = 0;
    }
  return (fstart);
}
Пример #8
0
int ctr_crypto_interface_read_sector(void *io, void *buffer, size_t buffer_size, size_t sector, size_t count)
{
	int res = 0;
	ctr_crypto_interface *crypto_io = io;
	const size_t sector_size = ctr_io_sector_size(crypto_io->lower_io);
	const size_t block_size = AES_BLOCK_SIZE;

	size_t result_count = count < buffer_size/sector_size ? count : buffer_size/sector_size;

	void (*block_function)(void *io, void *buffer, uint64_t block, size_t block_count);
	block_function = output;

	if (result_count)
	{
		size_t result_size = result_count * sector_size;
		uint8_t *current_processed = buffer;
		size_t current_block = get_prev_relative_chunk(sector, sector_size, block_size);

		res = ctr_io_read_sector(crypto_io->lower_io, buffer, buffer_size, sector, result_count);
		if (res)
			return res;

		//Part 1, deal with misaligned first block
		size_t sectors_to_copy_prior = get_chunks_to_complete_relative_chunk_backwards(sector, sector_size, block_size);
		if (sectors_to_copy_prior)
		{
			uint8_t buf[block_size];
			res = get_misaligned_block(crypto_io, sector, sector_size, block_size, buf, block_function);
			if (res)
				return res;

			//We now have the full block-- we now need to figure out which part of it to copy
			uint64_t sector_pos = get_chunk_position(sector, sector_size);
			uint64_t block_pos = get_chunk_position(current_block, block_size);

			size_t amount_to_copy = block_size - (sector_pos - block_pos);
			amount_to_copy = amount_to_copy < buffer_size ? amount_to_copy : buffer_size;
			memcpy(buffer, buf + (sector_pos - block_pos), amount_to_copy);
			current_processed += amount_to_copy;

			current_block++;
		}

		//Part 2, Deal with all intermediate blocks
		size_t bytes_left = result_size - (size_t)(current_processed - (uint8_t*)buffer);
		size_t blocks = FLOOR(bytes_left, block_size);
		if (blocks)
		{
			block_function(io, current_processed, current_block, blocks);
			current_block += blocks;
			current_processed += blocks * block_size;
			bytes_left -= blocks * block_size;
		}

		//Part 3, deal with the final block
		//FIXME what if we need to deal with a block that actually continues past the end of the disk? pad with zero?
		if (bytes_left)
		{
			uint8_t buf[block_size];
			//size_t current_sector = get_next_relative_block
			size_t block_sector = get_prev_relative_chunk(current_block, block_size, sector_size);
			res = get_misaligned_block(crypto_io, block_sector, sector_size, block_size, buf, block_function);
			if (res)
				return res;

			//We now have the full block-- we now need to figure out which part of it to copy
			uint64_t sector_pos = get_chunk_position(block_sector, sector_size);
			uint64_t block_pos = get_chunk_position(current_block, block_size);

			size_t amount_to_copy = block_size - (sector_pos - block_pos);
			amount_to_copy = amount_to_copy < bytes_left ? amount_to_copy : bytes_left;
			memcpy(current_processed, buf + (sector_pos - block_pos), amount_to_copy);
		}

	}
	return res;
}