/*******************************************************************************
 * Generic function to load an image into the trusted RAM,
 * given a name, extents of free memory & whether the image should be loaded at
 * the bottom or top of the free memory. It updates the memory layout if the
 * load is successful. It also updates the image information and the entry point
 * information in the params passed
 ******************************************************************************/
int load_image(meminfo_t *mem_layout,
			 const char *image_name,
			 unsigned int load_type,
			 unsigned long fixed_addr,
			 image_info_t *image_data,
			 entry_point_info_t *entry_point_info)
{
	uintptr_t dev_handle;
	uintptr_t image_handle;
	uintptr_t image_spec;
	unsigned long temp_image_base = 0;
	unsigned long image_base = 0;
	long offset = 0;
	size_t image_size = 0;
	size_t bytes_read = 0;
	int io_result = IO_FAIL;

	assert(mem_layout != NULL);
	assert(image_name != NULL);
	assert(image_data->h.version >= VERSION_1);

	/* Obtain a reference to the image by querying the platform layer */
	io_result = plat_get_image_source(image_name, &dev_handle, &image_spec);
	if (io_result != IO_SUCCESS) {
		WARN("Failed to obtain reference to image '%s' (%i)\n",
			image_name, io_result);
		return io_result;
	}

	/* Attempt to access the image */
	io_result = io_open(dev_handle, image_spec, &image_handle);
	if (io_result != IO_SUCCESS) {
		WARN("Failed to access image '%s' (%i)\n",
			image_name, io_result);
		return io_result;
	}

	/* Find the size of the image */
	io_result = io_size(image_handle, &image_size);
	if ((io_result != IO_SUCCESS) || (image_size == 0)) {
		WARN("Failed to determine the size of the image '%s' file (%i)\n",
			image_name, io_result);
		goto exit;
	}

	/* See if we have enough space */
	if (image_size > mem_layout->free_size) {
		WARN("Cannot load '%s' file: Not enough space.\n",
			image_name);
		dump_load_info(0, image_size, mem_layout);
		goto exit;
	}

	switch (load_type) {

	case TOP_LOAD:

	  /* Load the image in the top of free memory */
	  temp_image_base = mem_layout->free_base + mem_layout->free_size;
	  temp_image_base -= image_size;

	  /* Page align base address and check whether the image still fits */
	  image_base = page_align(temp_image_base, DOWN);
	  assert(image_base <= temp_image_base);

	  if (image_base < mem_layout->free_base) {
		WARN("Cannot load '%s' file: Not enough space.\n",
			image_name);
		dump_load_info(image_base, image_size, mem_layout);
		io_result = -ENOMEM;
		goto exit;
	  }

	  /* Calculate the amount of extra memory used due to alignment */
	  offset = temp_image_base - image_base;

	  break;

	case BOT_LOAD:

	  /* Load the BL2 image in the bottom of free memory */
	  temp_image_base = mem_layout->free_base;
	  image_base = page_align(temp_image_base, UP);
	  assert(image_base >= temp_image_base);

	  /* Page align base address and check whether the image still fits */
	  if (image_base + image_size >
	      mem_layout->free_base + mem_layout->free_size) {
		WARN("Cannot load '%s' file: Not enough space.\n",
		  image_name);
		dump_load_info(image_base, image_size, mem_layout);
		io_result = -ENOMEM;
		goto exit;
	  }

	  /* Calculate the amount of extra memory used due to alignment */
	  offset = image_base - temp_image_base;

	  break;

	default:
	  assert(0);

	}

	/*
	 * Some images must be loaded at a fixed address, not a dynamic one.
	 *
	 * This has been implemented as a hack on top of the existing dynamic
	 * loading mechanism, for the time being.  If the 'fixed_addr' function
	 * argument is different from zero, then it will force the load address.
	 * So we still have this principle of top/bottom loading but the code
	 * determining the load address is bypassed and the load address is
	 * forced to the fixed one.
	 *
	 * This can result in quite a lot of wasted space because we still use
	 * 1 sole meminfo structure to represent the extents of free memory,
	 * where we should use some sort of linked list.
	 *
	 * E.g. we want to load BL2 at address 0x04020000, the resulting memory
	 *      layout should look as follows:
	 * ------------ 0x04040000
	 * |          |  <- Free space (1)
	 * |----------|
	 * |   BL2    |
	 * |----------| 0x04020000
	 * |          |  <- Free space (2)
	 * |----------|
	 * |   BL1    |
	 * ------------ 0x04000000
	 *
	 * But in the current hacky implementation, we'll need to specify
	 * whether BL2 is loaded at the top or bottom of the free memory.
	 * E.g. if BL2 is considered as top-loaded, the meminfo structure
	 * will give the following view of the memory, hiding the chunk of
	 * free memory above BL2:
	 * ------------ 0x04040000
	 * |          |
	 * |          |
	 * |   BL2    |
	 * |----------| 0x04020000
	 * |          |  <- Free space (2)
	 * |----------|
	 * |   BL1    |
	 * ------------ 0x04000000
	 */
	if (fixed_addr != 0) {
		/* Load the image at the given address. */
		image_base = fixed_addr;

		/* Check whether the image fits. */
		if ((image_base < mem_layout->free_base) ||
		    (image_base + image_size >
		       mem_layout->free_base + mem_layout->free_size)) {
			WARN("Cannot load '%s' file: Not enough space.\n",
				image_name);
			dump_load_info(image_base, image_size, mem_layout);
			io_result = -ENOMEM;
			goto exit;
		}

		/* Check whether the fixed load address is page-aligned. */
		if (!is_page_aligned(image_base)) {
			WARN("Cannot load '%s' file at unaligned address 0x%lx\n",
				image_name, fixed_addr);
			io_result = -ENOMEM;
			goto exit;
		}

		/*
		 * Calculate the amount of extra memory used due to fixed
		 * loading.
		 */
		if (load_type == TOP_LOAD) {
			unsigned long max_addr, space_used;
			/*
			 * ------------ max_addr
			 * | /wasted/ |                 | offset
			 * |..........|..............................
			 * |  image   |                 | image_flen
			 * |----------| fixed_addr
			 * |          |
			 * |          |
			 * ------------ total_base
			 */
			max_addr = mem_layout->total_base + mem_layout->total_size;
			/*
			 * Compute the amount of memory used by the image.
			 * Corresponds to all space above the image load
			 * address.
			 */
			space_used = max_addr - fixed_addr;
			/*
			 * Calculate the amount of wasted memory within the
			 * amount of memory used by the image.
			 */
			offset = space_used - image_size;
		} else /* BOT_LOAD */
			/*
			 * ------------
			 * |          |
			 * |          |
			 * |----------|
			 * |  image   |
			 * |..........| fixed_addr
			 * | /wasted/ |                 | offset
			 * ------------ total_base
			 */
			offset = fixed_addr - mem_layout->total_base;
	}

	/* We have enough space so load the image now */
	/* TODO: Consider whether to try to recover/retry a partially successful read */
	io_result = io_read(image_handle, image_base, image_size, &bytes_read);
	if ((io_result != IO_SUCCESS) || (bytes_read < image_size)) {
		WARN("Failed to load '%s' file (%i)\n", image_name, io_result);
		goto exit;
	}

	image_data->image_base = image_base;
	image_data->image_size = image_size;

	entry_point_info->pc = image_base;

	/*
	 * File has been successfully loaded. Update the free memory
	 * data structure & flush the contents of the TZRAM so that
	 * the next EL can see it.
	 */
	/* Update the memory contents */
	flush_dcache_range(image_base, image_size);

	mem_layout->free_size -= image_size + offset;

	/* Update the base of free memory since its moved up */
	if (load_type == BOT_LOAD)
		mem_layout->free_base += offset + image_size;

exit:
	io_close(image_handle);
	/* Ignore improbable/unrecoverable error in 'close' */

	/* TODO: Consider maintaining open device connection from this bootloader stage */
	io_dev_close(dev_handle);
	/* Ignore improbable/unrecoverable error in 'dev_close' */

	return io_result;
}
Exemple #2
0
void
btr_pcur_store_position(
/*====================*/
	btr_pcur_t*	cursor, /* in: persistent cursor */
	mtr_t*		mtr)	/* in: mtr */
{
	page_cur_t*	page_cursor;
	rec_t*		rec;
	dict_index_t*	index;
	page_t*		page;
	ulint		offs;

	ut_a(cursor->pos_state == BTR_PCUR_IS_POSITIONED);
	ut_ad(cursor->latch_mode != BTR_NO_LATCHES);

	index = btr_cur_get_index(btr_pcur_get_btr_cur(cursor));

	page_cursor = btr_pcur_get_page_cur(cursor);

	rec = page_cur_get_rec(page_cursor);
	page = page_align(rec);
	offs = page_offset(rec);

	ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
				MTR_MEMO_PAGE_S_FIX)
	      || mtr_memo_contains(mtr, buf_block_align(page),
				   MTR_MEMO_PAGE_X_FIX));
	ut_a(cursor->latch_mode != BTR_NO_LATCHES);

	if (UNIV_UNLIKELY(page_get_n_recs(page) == 0)) {
		/* It must be an empty index tree; NOTE that in this case
		we do not store the modify_clock, but always do a search
		if we restore the cursor position */

		ut_a(btr_page_get_next(page, mtr) == FIL_NULL);
		ut_a(btr_page_get_prev(page, mtr) == FIL_NULL);

		cursor->old_stored = BTR_PCUR_OLD_STORED;

		if (page_rec_is_supremum_low(offs)) {

			cursor->rel_pos = BTR_PCUR_AFTER_LAST_IN_TREE;
		} else {
			cursor->rel_pos = BTR_PCUR_BEFORE_FIRST_IN_TREE;
		}

		return;
	}

	if (page_rec_is_supremum_low(offs)) {

		rec = page_rec_get_prev(rec);

		cursor->rel_pos = BTR_PCUR_AFTER;

	} else if (page_rec_is_infimum_low(offs)) {

		rec = page_rec_get_next(rec);

		cursor->rel_pos = BTR_PCUR_BEFORE;
	} else {
		cursor->rel_pos = BTR_PCUR_ON;
	}

	cursor->old_stored = BTR_PCUR_OLD_STORED;
	cursor->old_rec = dict_index_copy_rec_order_prefix(
		index, rec, &cursor->old_n_fields,
		&cursor->old_rec_buf, &cursor->buf_size);

	cursor->block_when_stored = buf_block_align(page);
	cursor->modify_clock = buf_block_get_modify_clock(
		cursor->block_when_stored);
}
/*******************************************************************************
 * Perform the very early platform specific architectural setup here. At the
 * moment this only intializes the mmu in a quick and dirty way.
 ******************************************************************************/
void bl31_plat_arch_setup(void)
{
	uint64_t rw_start = BL31_RW_START;
	uint64_t rw_size = BL31_RW_END - BL31_RW_START;
	uint64_t rodata_start = BL31_RODATA_BASE;
	uint64_t rodata_size = BL31_RODATA_END - BL31_RODATA_BASE;
	uint64_t code_base = TEXT_START;
	uint64_t code_size = TEXT_END - TEXT_START;
	const mmap_region_t *plat_mmio_map = NULL;
#if USE_COHERENT_MEM
	uint32_t coh_start, coh_size;
#endif
	const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();

	/*
	 * Add timestamp for arch setup entry.
	 */
	boot_profiler_add_record("[TF] arch setup entry");

	/* add memory regions */
	mmap_add_region(rw_start, rw_start,
			rw_size,
			MT_MEMORY | MT_RW | MT_SECURE);
	mmap_add_region(rodata_start, rodata_start,
			rodata_size,
			MT_RO_DATA | MT_SECURE);
	mmap_add_region(code_base, code_base,
			code_size,
			MT_CODE | MT_SECURE);

	/* map TZDRAM used by BL31 as coherent memory */
	if (TEGRA_TZRAM_BASE == tegra_bl31_phys_base) {
		mmap_add_region(params_from_bl2->tzdram_base,
				params_from_bl2->tzdram_base,
				BL31_SIZE,
				MT_DEVICE | MT_RW | MT_SECURE);
	}

#if USE_COHERENT_MEM
	coh_start = total_base + (BL_COHERENT_RAM_BASE - BL31_RO_BASE);
	coh_size = BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE;

	mmap_add_region(coh_start, coh_start,
			coh_size,
			(uint8_t)MT_DEVICE | (uint8_t)MT_RW | (uint8_t)MT_SECURE);
#endif

	/* map on-chip free running uS timer */
	mmap_add_region(page_align(TEGRA_TMRUS_BASE, 0),
			page_align(TEGRA_TMRUS_BASE, 0),
			TEGRA_TMRUS_SIZE,
			(uint8_t)MT_DEVICE | (uint8_t)MT_RO | (uint8_t)MT_SECURE);

	/* add MMIO space */
	plat_mmio_map = plat_get_mmio_map();
	if (plat_mmio_map != NULL) {
		mmap_add(plat_mmio_map);
	} else {
		WARN("MMIO map not available\n");
	}

	/* set up translation tables */
	init_xlat_tables();

	/* enable the MMU */
	enable_mmu_el3(0);

	/*
	 * Add timestamp for arch setup exit.
	 */
	boot_profiler_add_record("[TF] arch setup exit");

	INFO("BL3-1: Tegra: MMU enabled\n");
}
Exemple #4
0
static int32_t tbase_init_entry()
{
  DBG_PRINTF("tbase_init\n\r");

  // Save el1 registers in case non-secure world has already been set up.
  cm_el1_sysregs_context_save(NON_SECURE);

  uint64_t mpidr = read_mpidr();
  uint32_t linear_id = platform_get_core_pos(mpidr);
  tbase_context *tbase_ctx = &secure_context[linear_id];
  
  
  // Note: mapping is 1:1, so physical and virtual addresses are here the same.
  cpu_context_t *ns_entry_context = (cpu_context_t *) cm_get_context(mpidr, NON_SECURE);  
  
  // ************************************************************************************
  // Configure parameter passing to tbase
  
  // Calculate page start addresses for register areas.
  registerFileStart[REGISTER_FILE_NWD] = page_align((uint64_t)&ns_entry_context, DOWN);
  registerFileStart[REGISTER_FILE_MONITOR] = page_align((uint64_t)&msm_area, DOWN);

  // Calculate page end addresses for register areas.
  registerFileEnd[REGISTER_FILE_NWD] = (uint64_t)(&ns_entry_context[TBASE_CORE_COUNT]);
  registerFileEnd[REGISTER_FILE_MONITOR] = ((uint64_t)&msm_area) +sizeof(msm_area);

  int32_t totalPages = 0;
  for (int area=0; area<REGISTER_FILE_COUNT; area++) {
    int32_t pages = page_align(registerFileEnd[area] - registerFileStart[area], UP) / PAGE_SIZE;
    assert( pages +totalPages <= TBASE_INTERFACE_PAGES );
    tbase_init_register_file(area, totalPages, pages);
    totalPages += pages;
  }

  // ************************************************************************************
  // Create boot structure
  tbaseBootCfg.magic       = TBASE_BOOTCFG_MAGIC;
  tbaseBootCfg.length      = sizeof(bootCfg_t);
  tbaseBootCfg.version     = TBASE_MONITOR_INTERFACE_VERSION;
  
  tbaseBootCfg.dRamBase    = TBASE_NWD_DRAM_BASE;
  tbaseBootCfg.dRamSize    = TBASE_NWD_DRAM_SIZE;
  tbaseBootCfg.secDRamBase = TBASE_SWD_DRAM_BASE;
  tbaseBootCfg.secDRamSize = TBASE_SWD_DRAM_SIZE;
  tbaseBootCfg.secIRamBase = TBASE_SWD_IMEM_BASE;
  tbaseBootCfg.secIRamSize = TBASE_SWD_IMEM_SIZE;
  
  tbaseBootCfg.conf_mair_el3 = read_mair_el3();
  tbaseBootCfg.MSMPteCount = totalPages;
  tbaseBootCfg.MSMBase = (uint64_t)registerFileL2;
  
  tbaseBootCfg.gic_distributor_base = TBASE_GIC_DIST_BASE;
  tbaseBootCfg.gic_cpuinterface_base = TBASE_GIC_CPU_BASE;
  tbaseBootCfg.gic_version = TBASE_GIC_VERSION;
  
  tbaseBootCfg.total_number_spi = TBASE_SPI_COUNT;
  tbaseBootCfg.ssiq_number = TBASE_SSIQ_NRO;
  
  tbaseBootCfg.flags       = TBASE_MONITOR_FLAGS;


        DBG_PRINTF("*** tbase boot cfg ***\n\r");
        DBG_PRINTF("* magic                 : 0x%.X\n\r",tbaseBootCfg.magic);
        DBG_PRINTF("* length                : 0x%.X\n\r",tbaseBootCfg.length);
        DBG_PRINTF("* version               : 0x%.X\n\r",tbaseBootCfg.version);
        DBG_PRINTF("* dRamBase              : 0x%.X\n\r",tbaseBootCfg.dRamBase);
        DBG_PRINTF("* dRamSize              : 0x%.X\n\r",tbaseBootCfg.dRamSize);
        DBG_PRINTF("* secDRamBase           : 0x%.X\n\r",tbaseBootCfg.secDRamBase);
        DBG_PRINTF("* secDRamSize           : 0x%.X\n\r",tbaseBootCfg.secDRamSize);
        DBG_PRINTF("* secIRamBase           : 0x%.X\n\r",tbaseBootCfg.secIRamBase);
        DBG_PRINTF("* secIRamSize           : 0x%.X\n\r",tbaseBootCfg.secIRamSize);
        DBG_PRINTF("* conf_mair_el3         : 0x%.X\n\r",tbaseBootCfg.conf_mair_el3);
        DBG_PRINTF("* MSMPteCount           : 0x%.X\n\r",tbaseBootCfg.MSMPteCount);
        DBG_PRINTF("* MSMBase               : 0x%.X\n\r",tbaseBootCfg.MSMBase);
        DBG_PRINTF("* gic_distributor_base  : 0x%.X\n\r",tbaseBootCfg.gic_distributor_base);
        DBG_PRINTF("* gic_cpuinterface_base : 0x%.X\n\r",tbaseBootCfg.gic_cpuinterface_base);
        DBG_PRINTF("* gic_version           : 0x%.X\n\r",tbaseBootCfg.gic_version);
        DBG_PRINTF("* total_number_spi      : 0x%.X\n\r",tbaseBootCfg.total_number_spi);
        DBG_PRINTF("* ssiq_number           : 0x%.X\n\r",tbaseBootCfg.ssiq_number);
        DBG_PRINTF("* flags                 : 0x%.X\n\r",tbaseBootCfg.flags);

  // ************************************************************************************
  // tbaseBootCfg and l2 entries may be accesses uncached, so must flush those.
  flush_dcache_range((unsigned long)&tbaseBootCfg, sizeof(bootCfg_t));
  flush_dcache_range((unsigned long)&registerFileL2, sizeof(registerFileL2));
  
  // ************************************************************************************
  // Set registers for tbase initialization entry
  cpu_context_t *s_entry_context = &tbase_ctx->cpu_ctx;
  gp_regs_t *s_entry_gpregs = get_gpregs_ctx(s_entry_context);
  write_ctx_reg(s_entry_gpregs, CTX_GPREG_X1, 0);
  write_ctx_reg(s_entry_gpregs, CTX_GPREG_X1, (int64_t)&tbaseBootCfg);

  
  // SPSR for SMC handling (FIQ mode)
  tbaseEntrySpsr = TBASE_ENTRY_SPSR;
  
  DBG_PRINTF("tbase init SPSR 0x%x\n\r", read_ctx_reg(get_el3state_ctx(&tbase_ctx->cpu_ctx), 
             CTX_SPSR_EL3) );
  DBG_PRINTF("tbase SMC SPSR %x\nr\r", tbaseEntrySpsr );

  // ************************************************************************************
  // Start tbase

  tbase_synchronous_sp_entry(tbase_ctx);
  tbase_ctx->state = TBASE_STATE_ON;
  
#if TBASE_PM_ENABLE
  // Register power managemnt hooks with PSCI
  psci_register_spd_pm_hook(&tbase_pm);
#endif

  cm_el1_sysregs_context_restore(NON_SECURE);
  cm_set_next_eret_context(NON_SECURE);

  return 1;
}
Exemple #5
0
/***********************************************************************//**
Gets the next record to purge and updates the info in the purge system.
@return	copy of an undo log record or pointer to the dummy undo log record */
static
trx_undo_rec_t*
trx_purge_get_next_rec(
/*===================*/
	mem_heap_t*	heap)	/*!< in: memory heap where copied */
{
	trx_undo_rec_t*	rec;
	trx_undo_rec_t*	rec_copy;
	trx_undo_rec_t*	rec2;
	trx_undo_rec_t*	next_rec;
	page_t*		undo_page;
	page_t*		page;
	ulint		offset;
	ulint		page_no;
	ulint		space;
	ulint		zip_size;
	ulint		type;
	ulint		cmpl_info;
	mtr_t		mtr;

	ut_ad(purge_sys->next_stored);

	space = purge_sys->rseg->space;
	zip_size = purge_sys->rseg->zip_size;
	page_no = purge_sys->page_no;
	offset = purge_sys->offset;

	if (offset == 0) {
		/* It is the dummy undo log record, which means that there is
		no need to purge this undo log */

		trx_purge_rseg_get_next_history_log(purge_sys->rseg);

		/* Look for the next undo log and record to purge */

		trx_purge_choose_next_log();

		return(&trx_purge_dummy_rec);
	}

	mtr_start(&mtr);

	undo_page = trx_undo_page_get_s_latched(space, zip_size, page_no, &mtr);

	rec = undo_page + offset;

	rec2 = rec;

	for (;;) {
		/* Try first to find the next record which requires a purge
		operation from the same page of the same undo log */

		next_rec = trx_undo_page_get_next_rec(
			rec2, purge_sys->hdr_page_no, purge_sys->hdr_offset);

		if (next_rec == NULL) {
			rec2 = trx_undo_get_next_rec(
				rec2, purge_sys->hdr_page_no,
				purge_sys->hdr_offset, &mtr);
			break;
		}

		rec2 = next_rec;

		type = trx_undo_rec_get_type(rec2);

		if (type == TRX_UNDO_DEL_MARK_REC) {

			break;
		}

		cmpl_info = trx_undo_rec_get_cmpl_info(rec2);

		if (trx_undo_rec_get_extern_storage(rec2)) {
			break;
		}

		if ((type == TRX_UNDO_UPD_EXIST_REC)
		    && !(cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
			break;
		}
	}

	if (rec2 == NULL) {
		mtr_commit(&mtr);

		trx_purge_rseg_get_next_history_log(purge_sys->rseg);

		/* Look for the next undo log and record to purge */

		trx_purge_choose_next_log();

		mtr_start(&mtr);

		undo_page = trx_undo_page_get_s_latched(space, zip_size,
							page_no, &mtr);

		rec = undo_page + offset;
	} else {
		page = page_align(rec2);

		purge_sys->purge_undo_no = trx_undo_rec_get_undo_no(rec2);
		purge_sys->page_no = page_get_page_no(page);
		purge_sys->offset = rec2 - page;

		if (undo_page != page) {
			/* We advance to a new page of the undo log: */
			purge_sys->n_pages_handled++;
		}
	}

	rec_copy = trx_undo_rec_copy(rec, heap);

	mtr_commit(&mtr);

	return(rec_copy);
}
Exemple #6
0
/* Tests covered here:
   - TEST_VM_PROT_* program slices actually succeeds when a crash occurs
   - TEST_VM_MAP_ANON* program slices succeeds when it could be compiled
*/
int
main(void)
{
	D(bug("%s\n", __func__));

	vm_init();

	signal(SIGSEGV, fault_handler);
#ifdef SIGBUS
	signal(SIGBUS,  fault_handler);
#endif
	
#define page_align(address) ((char *)((vm_uintptr_t)(address) & -page_size))
	vm_uintptr_t page_size = vm_get_page_size();
	
	const int area_size = 6 * page_size;
	volatile char * area = (volatile char *) vm_acquire(area_size);
	volatile char * fault_address = area + (page_size * 7) / 2;

#if defined(TEST_VM_MMAP_ANON) || defined(TEST_VM_MMAP_ANONYMOUS)
	if (area == VM_MAP_FAILED)
		return 1;

	if (vm_release((char *)area, area_size) < 0)
		return 1;
	
	return 0;
#endif

#if defined(TEST_VM_PROT_NONE_READ) || defined(TEST_VM_PROT_NONE_WRITE)
	if (area == VM_MAP_FAILED)
		return 0;
	
	if (vm_protect(page_align(fault_address), page_size, VM_PAGE_NOACCESS) < 0)
		return 0;
#endif

#if defined(TEST_VM_PROT_RDWR_WRITE)
	if (area == VM_MAP_FAILED)
		return 1;
	
	if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
		return 1;
	
	if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ | VM_PAGE_WRITE) < 0)
		return 1;
#endif

#if defined(TEST_VM_PROT_READ_WRITE)
	if (vm_protect(page_align(fault_address), page_size, VM_PAGE_READ) < 0)
		return 0;
#endif

#if defined(TEST_VM_PROT_NONE_READ)
	// this should cause a core dump
	char foo = *fault_address;
	return 0;
#endif

#if defined(TEST_VM_PROT_NONE_WRITE) || defined(TEST_VM_PROT_READ_WRITE)
	// this should cause a core dump
	*fault_address = 'z';
	return 0;
#endif

#if defined(TEST_VM_PROT_RDWR_WRITE)
	// this should not cause a core dump
	*fault_address = 'z';
	return 0;
#endif
}
Exemple #7
0
/********************************************************************//**
Removes a node. */
UNIV_INTERN
void
flst_remove(
/*========*/
	flst_base_node_t*	base,	/*!< in: pointer to base node of list */
	flst_node_t*		node2,	/*!< in: node to remove */
	mtr_t*			mtr)	/*!< in: mini-transaction handle */
{
	ulint		space;
	ulint		zip_size;
	flst_node_t*	node1;
	fil_addr_t	node1_addr;
	fil_addr_t	node2_addr;
	flst_node_t*	node3;
	fil_addr_t	node3_addr;
	ulint		len;

	ut_ad(mtr && node2 && base);
	ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX));
	ut_ad(mtr_memo_contains_page(mtr, node2, MTR_MEMO_PAGE_X_FIX));

	buf_ptr_get_fsp_addr(node2, &space, &node2_addr);
	zip_size = fil_space_get_zip_size(space);

	node1_addr = flst_get_prev_addr(node2, mtr);
	node3_addr = flst_get_next_addr(node2, mtr);

	if (!fil_addr_is_null(node1_addr)) {

		/* Update next field of node1 */

		if (node1_addr.page == node2_addr.page) {

			node1 = page_align(node2) + node1_addr.boffset;
		} else {
			node1 = fut_get_ptr(space, zip_size,
					    node1_addr, RW_X_LATCH, mtr);
		}

		ut_ad(node1 != node2);

		flst_write_addr(node1 + FLST_NEXT, node3_addr, mtr);
	} else {
		/* node2 was first in list: update first field in base */
		flst_write_addr(base + FLST_FIRST, node3_addr, mtr);
	}

	if (!fil_addr_is_null(node3_addr)) {
		/* Update prev field of node3 */

		if (node3_addr.page == node2_addr.page) {

			node3 = page_align(node2) + node3_addr.boffset;
		} else {
			node3 = fut_get_ptr(space, zip_size,
					    node3_addr, RW_X_LATCH, mtr);
		}

		ut_ad(node2 != node3);

		flst_write_addr(node3 + FLST_PREV, node1_addr, mtr);
	} else {
		/* node2 was last in list: update last field in base */
		flst_write_addr(base + FLST_LAST, node1_addr, mtr);
	}

	/* Update len of base node */
	len = flst_get_len(base, mtr);
	ut_ad(len > 0);

	mlog_write_ulint(base + FLST_LEN, len - 1, MLOG_4BYTES, mtr);
}
Exemple #8
0
// ************************************************************************************
// fastcall handler
static uint64_t tbase_fastcall_handler(uint32_t smc_fid,
        uint64_t x1,
        uint64_t x2,
        uint64_t x3,
        uint64_t x4,
        void *cookie,
        void *handle,
        uint64_t flags)
{
  uint64_t mpidr = read_mpidr();
  uint32_t linear_id = platform_get_core_pos(mpidr);
  tbase_context *tbase_ctx = &secure_context[linear_id];
  int caller_security_state = flags&1;
      
  if (caller_security_state==SECURE) {
    switch(maskSWdRegister(smc_fid)) {
      case TBASE_SMC_FASTCALL_RETURN: {
        // Return values from fastcall already in cpu_context!
        // TODO: Could we skip saving sysregs?
        DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_RETURN\n\r");
        tbase_synchronous_sp_exit(tbase_ctx, 0, 1);
      } 
      case TBASE_SMC_FASTCALL_CONFIG_OK: {
                                DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_CONFIG_OK\n\r");
        configure_tbase(x1,x2);
        SMC_RET1(handle,smc_fid);
        break;
      } 
      case TBASE_SMC_FASTCALL_OUTPUT: {
        output(x1,x2);
        SMC_RET1(handle,smc_fid);
        break;
      }
      case TBASE_SMC_FASTCALL_STATUS: {
                                DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_STATUS\n\r");
        tbase_status(x1,x2);
        SMC_RET1(handle,smc_fid);
        break;
      }
      case TBASE_SMC_FASTCALL_INPUT: {
                                DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_INPUT\n\r");
        smc_fid = plat_tbase_input(x1,&x2,&(tbase_ctx->tbase_input_fastcall));
        SMC_RET3(handle,smc_fid,page_align(registerFileEnd[REGISTER_FILE_NWD] - registerFileStart[REGISTER_FILE_NWD], UP)+(uint64_t)&(tbase_ctx->tbase_input_fastcall)- registerFileStart[REGISTER_FILE_MONITOR],x2);
        break;
      }
      case TBASE_SMC_FASTCALL_DUMP: {
                                DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_DUMP\n\r");
        tbase_triggerSgiDump();
        SMC_RET1(handle,smc_fid);
        break;
      }
      
      default: {
        // What now?
        DBG_PRINTF( "tbase_fastcall_handler SMC_UNK %x\n\r", smc_fid );
        SMC_RET1(handle, SMC_UNK);
        break;
      }
    }
  }
  else
  {
    if (smc_fid == TBASE_SMC_AEE_DUMP)         // N-world can request AEE Dump function
    {
      mt_atf_trigger_WDT_FIQ();
      // Once we return to the N-world's caller,
      // FIQ will be trigged and bring us on EL3 (ATF) on core #0 because HW wiring.
      // Then FIQ will be handled the same way as for HW WDT FIQ.

      //Do we need to save-recover n-context before being able to use it for return?
      cm_el1_sysregs_context_restore(NON_SECURE);
      cm_set_next_eret_context(NON_SECURE);
      return 0;
    }
    if ((tbaseExecutionStatus&TBASE_STATUS_FASTCALL_OK_BIT)==0) {
      // TBASE must be initialized to be usable
      // TODO: What is correct error code?
      DBG_PRINTF( "tbase_fastcall_handler tbase not ready for fastcall\n\r" );
      SMC_RET1(handle, SMC_UNK);
      return 0;
    }
    if(tbase_ctx->state == TBASE_STATE_OFF) {
      DBG_PRINTF( "tbase_fastcall_handler tbase not ready for fastcall\n\r" );
      SMC_RET1(handle, SMC_UNK);
      return 0;
    }

    DBG_PRINTF( "tbase_fastcall_handler NWd %x\n\r", smc_fid );
    // So far all fastcalls go to tbase
    // Save NWd context
    gp_regs_t *ns_gpregs = get_gpregs_ctx((cpu_context_t *)handle);
    write_ctx_reg(ns_gpregs, CTX_GPREG_X0, smc_fid ); // These are not saved yet
    write_ctx_reg(ns_gpregs, CTX_GPREG_X1, x1 );
    write_ctx_reg(ns_gpregs, CTX_GPREG_X2, x2 );
    write_ctx_reg(ns_gpregs, CTX_GPREG_X3, x3 );
    cm_el1_sysregs_context_save(NON_SECURE);

    // Load SWd context
    tbase_setup_entry_nwd((cpu_context_t *)handle,ENTRY_OFFSET_FASTCALL);
#if DEBUG
    print_fastcall_params("entry", NON_SECURE);
#endif
    tbase_synchronous_sp_entry(tbase_ctx);
    cm_el1_sysregs_context_restore(NON_SECURE);
    cm_set_next_eret_context(NON_SECURE);
    return 0; // Does not seem to matter what we return
  }
}
Exemple #9
0
/*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted. If btr_search_enabled is set to FALSE, we will only allow
updating existing nodes, but no new node is allowed to be added.
@return	TRUE if succeed, FALSE if no more memory could be allocated */
UNIV_INTERN
ibool
ha_insert_for_fold_func(
/*====================*/
	hash_table_t*	table,	/*!< in: hash table */
	ulint		fold,	/*!< in: folded value of data; if a node with
				the same fold value already exists, it is
				updated to point to the same data, and no new
				node is created! */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
	buf_block_t*	block,	/*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
	rec_t*		data)	/*!< in: data, must not be NULL */
{
	hash_cell_t*	cell;
	ha_node_t*	node;
	ha_node_t*	prev_node;
	ulint		hash;

	ut_ad(data);
	ut_ad(table);
	ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
	ut_a(block->frame == page_align(data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#ifdef UNIV_SYNC_DEBUG
	ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
	ASSERT_HASH_MUTEX_OWN(table, fold);
	ut_ad(btr_search_enabled);

	hash = hash_calc_hash(fold, table);

	cell = hash_get_nth_cell(table, hash);

	prev_node = cell->node;

	while (prev_node != NULL) {
		if (prev_node->fold == fold) {
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
# ifndef UNIV_HOTBACKUP
			if (table->adaptive) {
				buf_block_t* prev_block = prev_node->block;
				ut_a(prev_block->frame
				     == page_align(prev_node->data));
				ut_a(prev_block->n_pointers > 0);
				prev_block->n_pointers--;
				block->n_pointers++;
			}
# endif /* !UNIV_HOTBACKUP */

			prev_node->block = block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
			prev_node->data = data;

			return(TRUE);
		}

		prev_node = prev_node->next;
	}

	/* We have to allocate a new chain node */

	node = mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t));

	if (node == NULL) {
		/* It was a btr search type memory heap and at the moment
		no more memory could be allocated: return */

		ut_ad(hash_get_heap(table, fold)->type & MEM_HEAP_BTR_SEARCH);

		return(FALSE);
	}

	ha_node_set_data(node, block, data);

#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
# ifndef UNIV_HOTBACKUP
	if (table->adaptive) {
		block->n_pointers++;
	}
# endif /* !UNIV_HOTBACKUP */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */

	node->fold = fold;

	node->next = NULL;

	prev_node = cell->node;

	if (prev_node == NULL) {

		cell->node = node;

		return(TRUE);
	}

	while (prev_node->next != NULL) {

		prev_node = prev_node->next;
	}

	prev_node->next = node;

	return(TRUE);
}
Exemple #10
0
int _snd_pcm_mmap(snd_pcm_t *pcm)
{
	unsigned int c;

	/* clear first */
	_snd_pcm_munmap(pcm);

	pcm->mmap_channels = calloc(pcm->channels, sizeof(*pcm->mmap_channels));
	if (!pcm->mmap_channels)
		return -ENOMEM;
	pcm->running_areas = calloc(pcm->channels, sizeof(*pcm->running_areas));
	if (!pcm->running_areas) {
		free(pcm->mmap_channels);
		pcm->mmap_channels = NULL;
		return -ENOMEM;
	}
	for (c = 0; c < pcm->channels; ++c) {
		snd_pcm_channel_info_t *i = &pcm->mmap_channels[c];
		i->info.channel = c;
		if (ioctl(pcm->fd, SNDRV_PCM_IOCTL_CHANNEL_INFO, &i->info) < 0)
			return -errno;
	}
	for (c = 0; c < pcm->channels; ++c) {
		snd_pcm_channel_info_t *i = &pcm->mmap_channels[c];
		snd_pcm_channel_area_t *a = &pcm->running_areas[c];
		char *ptr;
		size_t size;
		unsigned int c1;
		if (i->addr)
			goto copy;
                size = i->info.first + i->info.step * (pcm->buffer_size - 1) +
			pcm->sample_bits;
		for (c1 = c + 1; c1 < pcm->channels; ++c1) {
			snd_pcm_channel_info_t *i1 = &pcm->mmap_channels[c1];
			size_t s;
			if (i1->info.offset != i->info.offset)
				continue;
			s = i1->info.first + i1->info.step *
				(pcm->buffer_size - 1) + pcm->sample_bits;
			if (s > size)
				size = s;
		}
		size = (size + 7) / 8;
		size = page_align(size);
		ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
			   MAP_FILE|MAP_SHARED, pcm->fd, i->info.offset);
		if (ptr == MAP_FAILED)
			return -errno;
		i->addr = ptr;
		for (c1 = c + 1; c1 < pcm->channels; ++c1) {
			snd_pcm_channel_info_t *i1 = &pcm->mmap_channels[c1];
			if (i1->info.offset != i->info.offset)
				continue;
			i1->addr = i->addr;
		}
	copy:
		a->addr = i->addr;
		a->first = i->info.first;
		a->step = i->info.step;
	}
	return 0;
}
Exemple #11
0
/********************************************************//**
Opens a buffer for mlog, writes the initial log record and,
if needed, the field lengths of an index.
@return	buffer, NULL if log mode MTR_LOG_NONE */
UNIV_INTERN
byte*
mlog_open_and_write_index(
/*======================*/
	mtr_t*		mtr,	/*!< in: mtr */
	const byte*	rec,	/*!< in: index record or page */
	dict_index_t*	index,	/*!< in: record descriptor */
	byte		type,	/*!< in: log item type */
	ulint		size)	/*!< in: requested buffer size in bytes
				(if 0, calls mlog_close() and returns NULL) */
{
	byte*		log_ptr;
	const byte*	log_start;
	const byte*	log_end;

	ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));

	if (!page_rec_is_comp(rec)) {
		log_start = log_ptr = mlog_open(mtr, 11 + size);
		if (!log_ptr) {
			return(NULL); /* logging is disabled */
		}
		log_ptr = mlog_write_initial_log_record_fast(rec, type,
							     log_ptr, mtr);
		log_end = log_ptr + 11 + size;
	} else {
		ulint	i;
		ulint	n	= dict_index_get_n_fields(index);
        ibool   is_gcs_cluster = dict_index_is_gcs_clust_after_alter_table(index);
		/* total size needed */
        /* redo日志有可能需要多两字节,total需要根据实际情况分配空间! */
        ulint	total	= 11 + size + (n + 2 + (is_gcs_cluster ? 1 : 0)) * 2;
		ulint	alloc	= total;
        
        /* allocate at most DYN_ARRAY_DATA_SIZE at a time */
		if (alloc > DYN_ARRAY_DATA_SIZE) {
			alloc = DYN_ARRAY_DATA_SIZE;
		}
		log_start = log_ptr = mlog_open(mtr, alloc);
		if (!log_ptr) {
			return(NULL); /* logging is disabled */
		}
		log_end = log_ptr + alloc;
		log_ptr = mlog_write_initial_log_record_fast(rec, type,
							     log_ptr, mtr);

        /* 在第一次alter table前,所有gcs表可以当成compact表,即使是redo过程,这样redo log也兼容! */
        if (is_gcs_cluster)
        {
            //ut_ad(rec_is_gcs(rec) || rec == page_align(rec) || rec_get_status(rec) & REC_STATUS_NODE_PTR);               /* rec有可能就是页头,如日志MLOG_COMP_LIST_END_COPY_CREATED */
            mach_write_to_2(log_ptr, n | 0x8000);                         /* 标记是gcs表 */
        }
        else
        {
            ut_ad(rec == page_align(rec) || !rec_is_gcs(rec));
		    mach_write_to_2(log_ptr, n);
        }

        /* 对于gcs聚集索引,记录第一次alter table前聚集索引的字段数 */
        if (is_gcs_cluster)
        {
            log_ptr += 2;
            mach_write_to_2(log_ptr, (ulint)index->n_fields_before_alter);

            ut_ad(!index->n_fields_before_alter == !dict_index_is_gcs_clust_after_alter_table(index));
        }

		log_ptr += 2;
		mach_write_to_2(log_ptr,
				dict_index_get_n_unique_in_tree(index));
		log_ptr += 2;
		for (i = 0; i < n; i++) {
			dict_field_t*		field;
			const dict_col_t*	col;
			ulint			len;

			field = dict_index_get_nth_field(index, i);
			col = dict_field_get_col(field);
			len = field->fixed_len;
			ut_ad(len < 0x7fff);
			if (len == 0
			    && (col->len > 255 || col->mtype == DATA_BLOB)) {
				/* variable-length field
				with maximum length > 255 */
				len = 0x7fff;
			}
			if (col->prtype & DATA_NOT_NULL) {
				len |= 0x8000;
			}
			if (log_ptr + 2 > log_end) {
				mlog_close(mtr, log_ptr);
				ut_a(total > (ulint) (log_ptr - log_start));
				total -= log_ptr - log_start;
				alloc = total;
				if (alloc > DYN_ARRAY_DATA_SIZE) {
					alloc = DYN_ARRAY_DATA_SIZE;
				}
				log_start = log_ptr = mlog_open(mtr, alloc);
				if (!log_ptr) {
					return(NULL); /* logging is disabled */
				}
				log_end = log_ptr + alloc;
			}
			mach_write_to_2(log_ptr, len);
			log_ptr += 2;
		}
	}
	if (size == 0) {
		mlog_close(mtr, log_ptr);
		log_ptr = NULL;
	} else if (log_ptr + size > log_end) {
		mlog_close(mtr, log_ptr);
		log_ptr = mlog_open(mtr, size);
	}
	return(log_ptr);
}