Exemple #1
0
/* 
 * Locate a page of swap in physical memory, reserving swap cache space
 * and reading the disk if it is not already cached.
 * A failure return means that either the page allocation failed or that
 * the swap entry is no longer in use.
 */
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
			struct vm_area_struct *vma, unsigned long addr)
{
	struct page *found_page, *new_page = NULL;
	int err;

	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
		found_page = find_get_page(&swapper_space, entry.val);
		if (found_page)
			break;

		/*
		 * Get a new page to read into from swap.
		 */
		if (!new_page) {
			new_page = alloc_page_vma(gfp_mask, vma, addr);
			if (!new_page)
				break;		/* Out of memory */
		}

		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
		if (!swap_duplicate(entry))
			break;

		/*
		 * Associate the page with swap entry in the swap cache.
		 * May fail (-EEXIST) if there is already a page associated
		 * with this entry in the swap cache: added by a racing
		 * read_swap_cache_async, or add_to_swap or shmem_writepage
		 * re-using the just freed swap entry for an existing page.
		 * May fail (-ENOMEM) if radix-tree node allocation failed.
		 */
		__set_page_locked(new_page);
		SetPageSwapBacked(new_page);
		err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
		if (likely(!err)) {
			/*
			 * Initiate read into locked page and return.
			 */
			lru_cache_add_anon(new_page);
			swap_readpage(NULL, new_page);
			return new_page;
		}
		ClearPageSwapBacked(new_page);
		__clear_page_locked(new_page);
		swap_free(entry);
	} while (err != -ENOMEM);

	if (new_page)
		page_cache_release(new_page);
	return found_page;
}
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
			struct vm_area_struct *vma, unsigned long addr)
{
	struct page *found_page, *new_page = NULL;
	int err;

	do {
		found_page = find_get_page(&swapper_space, entry.val);
		if (found_page)
			break;

		if (!new_page) {
			new_page = alloc_page_vma(gfp_mask, vma, addr);
			if (!new_page)
				break;		
		}

		err = radix_tree_preload(gfp_mask & GFP_KERNEL);
		if (err)
			break;

		err = swapcache_prepare(entry);
		if (err == -EEXIST) {	
			radix_tree_preload_end();
			continue;
		}
		if (err) {		
			radix_tree_preload_end();
			break;
		}

		
		__set_page_locked(new_page);
		SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		if (likely(!err)) {
			radix_tree_preload_end();
			lru_cache_add_anon(new_page);
			swap_readpage(new_page);
			return new_page;
		}
		radix_tree_preload_end();
		ClearPageSwapBacked(new_page);
		__clear_page_locked(new_page);
		swapcache_free(entry, NULL);
	} while (err != -ENOMEM);

	if (new_page)
		page_cache_release(new_page);
	return found_page;
}
/* 
 * Locate a page of swap in physical memory, reserving swap cache space
 * and reading the disk if it is not already cached.
 * A failure return means that either the page allocation failed or that
 * the swap entry is no longer in use.
 */
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
			struct vm_area_struct *vma, unsigned long addr)
{
	struct page *found_page, *new_page = NULL;
	int err;

	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
		found_page = find_get_page(&swapper_space, entry.val);
		if (found_page)
			break;

		/*
		 * Get a new page to read into from swap.
		 */
		if (!new_page) {
			new_page = alloc_page_vma(gfp_mask, vma, addr);
			if (!new_page)
				break;		/* Out of memory */
		}

		/*
		 * call radix_tree_preload() while we can wait.
		 */
		err = radix_tree_preload(gfp_mask & GFP_KERNEL);
		if (err)
			break;

		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
		err = swapcache_prepare(entry);
		if (err == -EEXIST) {	/* seems racy */
			radix_tree_preload_end();
			continue;
		}
		if (err) {		/* swp entry is obsolete ? */
			radix_tree_preload_end();
			break;
		}

		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
		__set_page_locked(new_page);
		SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		if (likely(!err)) {
			radix_tree_preload_end();
			/*
			 * Initiate read into locked page and return.
			 */
			lru_cache_add_anon(new_page);
			swap_readpage(new_page);
			return new_page;
		}
		radix_tree_preload_end();
		ClearPageSwapBacked(new_page);
		__clear_page_locked(new_page);
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		swapcache_free(entry, NULL);
	} while (err != -ENOMEM);

	if (new_page)
		page_cache_release(new_page);
	return found_page;
}
/*
 * zswap_get_swap_cache_page
 *
 * This is an adaption of read_swap_cache_async()
 *
 * This function tries to find a page with the given swap entry
 * in the swapper_space address space (the swap cache).  If the page
 * is found, it is returned in retpage.  Otherwise, a page is allocated,
 * added to the swap cache, and returned in retpage.
 *
 * If success, the swap cache page is returned in retpage
 * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
 * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
 *     the new page is added to swapcache and locked
 * Returns ZSWAP_SWAPCACHE_FAIL on error
 */
static int zswap_get_swap_cache_page(swp_entry_t entry,
				struct page **retpage)
{
	struct page *found_page, *new_page = NULL;
	struct address_space *swapper_space = swap_address_space(entry);
	int err;

	*retpage = NULL;
	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
		found_page = find_get_page(swapper_space, entry.val);
		if (found_page)
			break;

		/*
		 * Get a new page to read into from swap.
		 */
		if (!new_page) {
			new_page = alloc_page(GFP_KERNEL);
			if (!new_page)
				break; /* Out of memory */
		}

		/*
		 * call radix_tree_preload() while we can wait.
		 */
		err = radix_tree_preload(GFP_KERNEL);
		if (err)
			break;

		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
		err = swapcache_prepare(entry);
		if (err == -EEXIST) { /* seems racy */
			radix_tree_preload_end();
			continue;
		}
		if (err) { /* swp entry is obsolete ? */
			radix_tree_preload_end();
			break;
		}

		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
		__set_page_locked(new_page);
		SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		if (likely(!err)) {
			radix_tree_preload_end();
			lru_cache_add_anon(new_page);
			*retpage = new_page;
			return ZSWAP_SWAPCACHE_NEW;
		}
		radix_tree_preload_end();
		ClearPageSwapBacked(new_page);
		__clear_page_locked(new_page);
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		swapcache_free(entry, NULL);
	} while (err != -ENOMEM);

	if (new_page)
		page_cache_release(new_page);
	if (!found_page)
		return ZSWAP_SWAPCACHE_FAIL;
	*retpage = found_page;
	return ZSWAP_SWAPCACHE_EXIST;
}
Exemple #5
0
/* Start the SMC PA */
int tf_start(struct tf_comm *comm,
	u32 workspace_addr, u32 workspace_size,
	u8 *pa_buffer, u32 pa_size,
	u32 conf_descriptor, u32 conf_offset, u32 conf_size)
{
	struct tf_l1_shared_buffer *l1_shared_buffer = NULL;
	struct tf_ns_pa_info pa_info;
	int ret;
	u32 descr;
	u32 sdp_backing_store_addr;
	u32 sdp_bkext_store_addr;
#ifdef CONFIG_SMP
	long ret_affinity;
	cpumask_t saved_cpu_mask;
	cpumask_t local_cpu_mask = CPU_MASK_NONE;

	/* OMAP4 Secure ROM Code can only be called from CPU0. */
	cpu_set(0, local_cpu_mask);
	sched_getaffinity(0, &saved_cpu_mask);
	ret_affinity = sched_setaffinity(0, &local_cpu_mask);
	if (ret_affinity != 0)
		dpr_err("sched_setaffinity #1 -> 0x%lX", ret_affinity);
#endif

	workspace_size -= SZ_1M;
	sdp_backing_store_addr = workspace_addr + workspace_size;
	workspace_size -= 0x20000;
	sdp_bkext_store_addr = workspace_addr + workspace_size;

	if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
		dpr_err("%s(%p): The SMC PA is already started\n",
			__func__, comm);

		ret = -EFAULT;
		goto error1;
	}

	if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) {
		dpr_err("%s(%p): The L1 structure size is incorrect!\n",
			__func__, comm);
		ret = -EFAULT;
		goto error1;
	}

	ret = tf_se_init(comm, sdp_backing_store_addr,
		sdp_bkext_store_addr);
	if (ret != 0) {
		dpr_err("%s(%p): SE initialization failed\n", __func__, comm);
		goto error1;
	}

	l1_shared_buffer =
		(struct tf_l1_shared_buffer *)
			internal_get_zeroed_page(GFP_KERNEL);

	if (l1_shared_buffer == NULL) {
		dpr_err("%s(%p): Ouf of memory!\n", __func__, comm);

		ret = -ENOMEM;
		goto error1;
	}
	/* Ensure the page is mapped */
	__set_page_locked(virt_to_page(l1_shared_buffer));

	dpr_info("%s(%p): L1SharedBuffer={0x%08x, 0x%08x}\n",
		__func__, comm,
		(u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer));

	descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer,
			current->mm);
	pa_info.certificate = (void *) workspace_addr;
	pa_info.parameters = (void *) __pa(l1_shared_buffer);
	pa_info.results = (void *) __pa(l1_shared_buffer);

	l1_shared_buffer->l1_shared_buffer_descr = descr & 0xFFF;

	l1_shared_buffer->backing_store_addr = sdp_backing_store_addr;
	l1_shared_buffer->backext_storage_addr = sdp_bkext_store_addr;
	l1_shared_buffer->workspace_addr = workspace_addr;
	l1_shared_buffer->workspace_size = workspace_size;

	dpr_info("%s(%p): System Configuration (%d bytes)\n",
		__func__, comm, conf_size);
	dpr_info("%s(%p): Starting PA (%d bytes)...\n",
		__func__, comm, pa_size);

	/*
	 * Make sure all data is visible to the secure world
	 */
	dmac_flush_range((void *)l1_shared_buffer,
		(void *)(((u32)l1_shared_buffer) + PAGE_SIZE));
	outer_clean_range(__pa(l1_shared_buffer),
		__pa(l1_shared_buffer) + PAGE_SIZE);

	if (pa_size > workspace_size) {
		dpr_err("%s(%p): PA size is incorrect (%x)\n",
			__func__, comm, pa_size);
		ret = -EFAULT;
		goto error1;
	}

	{
		void *tmp;
		tmp = ioremap_nocache(workspace_addr, pa_size);
		if (copy_from_user(tmp, pa_buffer, pa_size)) {
			iounmap(tmp);
			dpr_err("%s(%p): Cannot access PA buffer (%p)\n",
				__func__, comm, (void *) pa_buffer);
			ret = -EFAULT;
			goto error1;
		}
		iounmap(tmp);
	}

	dmac_flush_range((void *)&pa_info,
		(void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info)));
	outer_clean_range(__pa(&pa_info),
		__pa(&pa_info) + sizeof(struct tf_ns_pa_info));
	wmb();

	spin_lock(&(comm->lock));
	comm->l1_buffer = l1_shared_buffer;
	comm->l1_buffer->conf_descriptor = conf_descriptor;
	comm->l1_buffer->conf_offset     = conf_offset;
	comm->l1_buffer->conf_size       = conf_size;
	spin_unlock(&(comm->lock));
	l1_shared_buffer = NULL;

	/*
	 * Set the OS current time in the L1 shared buffer first. The secure
	 * world uses it as itw boot reference time.
	 */
	tf_set_current_time(comm);

	/* Workaround for issue #6081 */
	disable_nonboot_cpus();

	/*
	 * Start the SMC PA
	 */
	ret = omap4_secure_dispatcher(API_HAL_LM_PALOAD_INDEX,
		FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
		__pa(&pa_info), 0, 0, 0);
	if (ret != API_HAL_RET_VALUE_OK) {
		pr_err("SMC: Error while loading the PA [0x%x]\n", ret);
		goto error2;
	}

	/* Loop until the first S Yield RPC is received */
loop:
	mutex_lock(&(comm->rpc_mutex));

	if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) {
		dpr_info("%s: Executing CMD=0x%x\n",
			__func__, comm->l1_buffer->rpc_command);

		switch (comm->l1_buffer->rpc_command) {
		case RPC_CMD_YIELD:
			dpr_info("%s: RPC_CMD_YIELD\n", __func__);
			set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
				&(comm->flags));
			comm->l1_buffer->rpc_status = RPC_SUCCESS;
			break;

		case RPC_CMD_INIT:
			dpr_info("%s: RPC_CMD_INIT\n", __func__);
			comm->l1_buffer->rpc_status = tf_rpc_init(comm);
			break;

		case RPC_CMD_TRACE:
			comm->l1_buffer->rpc_status = tf_rpc_trace(comm);
			break;

		default:
			comm->l1_buffer->rpc_status = RPC_ERROR_BAD_PARAMETERS;
			break;
		}
		g_RPC_advancement = RPC_ADVANCEMENT_FINISHED;
	}

	mutex_unlock(&(comm->rpc_mutex));

	ret = tf_schedule_secure_world(comm);
	if (ret != 0) {
		pr_err("SMC: Error while loading the PA [0x%x]\n", ret);
		goto error2;
	}

	if (!test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
		goto loop;

	set_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags);
	wake_up(&(comm->wait_queue));
	ret = 0;

	/* Workaround for issue #6081 */
	enable_nonboot_cpus();

	goto exit;

error2:
	/* Workaround for issue #6081 */
	enable_nonboot_cpus();

	spin_lock(&(comm->lock));
	l1_shared_buffer = comm->l1_buffer;
	comm->l1_buffer = NULL;
	spin_unlock(&(comm->lock));

error1:
	if (l1_shared_buffer != NULL) {
		__clear_page_locked(virt_to_page(l1_shared_buffer));
		internal_free_page((unsigned long) l1_shared_buffer);
	}

exit:
#ifdef CONFIG_SMP
	ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
	if (ret_affinity != 0)
		dpr_err("sched_setaffinity #2 -> 0x%lX", ret_affinity);
#endif

	if (ret > 0)
		ret = -EFAULT;

	return ret;
}
/* 
 * Locate a page of swap in physical memory, reserving swap cache space
 * and reading the disk if it is not already cached.
 * A failure return means that either the page allocation failed or that
 * the swap entry is no longer in use.
 */
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
			struct vm_area_struct *vma, unsigned long addr)
{
	struct page *found_page, *new_page = NULL;
	int err;

	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
		found_page = find_get_page(&swapper_space, entry.val);
		if (found_page)
			break;

		/*
		 * Get a new page to read into from swap.
		 */
		if (!new_page) {
			new_page = alloc_page_vma(gfp_mask, vma, addr);
			if (!new_page)
				break;		/* Out of memory */
		}

		/*
		 * call radix_tree_preload() while we can wait.
		 */
		err = radix_tree_preload(gfp_mask & GFP_KERNEL);
		if (err)
			break;

		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
		err = swapcache_prepare(entry);
		if (err == -EEXIST) {
			radix_tree_preload_end();
			/*
			 * We might race against get_swap_page() and stumble
			 * across a SWAP_HAS_CACHE swap_map entry whose page
			 * has not been brought into the swapcache yet, while
			 * the other end is scheduled away waiting on discard
			 * I/O completion at scan_swap_map().
			 *
			 * In order to avoid turning this transitory state
			 * into a permanent loop around this -EEXIST case
			 * if !CONFIG_PREEMPT and the I/O completion happens
			 * to be waiting on the CPU waitqueue where we are now
			 * busy looping, we just conditionally invoke the
			 * scheduler here, if there are some more important
			 * tasks to run.
			 */
			cond_resched();
			continue;
		}
		if (err) {		/* swp entry is obsolete ? */
			radix_tree_preload_end();
			break;
		}

		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
		__set_page_locked(new_page);
		SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		if (likely(!err)) {
			radix_tree_preload_end();
			/*
			 * Initiate read into locked page and return.
			 */
			lru_cache_add_anon(new_page);
			swap_readpage(new_page);
			return new_page;
		}
		radix_tree_preload_end();
		ClearPageSwapBacked(new_page);
		__clear_page_locked(new_page);
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		swapcache_free(entry, NULL);
	} while (err != -ENOMEM);

	if (new_page)
		page_cache_release(new_page);
	return found_page;
}
Exemple #7
0
/* Start the SMC PA */
int tf_start(struct tf_comm *comm,
	u32 workspace_addr, u32 workspace_size,
	u8 *pa_buffer, u32 pa_size,
	u8 *properties_buffer, u32 properties_length)
{
	struct tf_init_buffer *init_shared_buffer = NULL;
	struct tf_l1_shared_buffer *l1_shared_buffer = NULL;
	u32 l1_shared_buffer_descr;
	struct tf_ns_pa_info pa_info;
	int ret;
	u32 descr;
	u32 sdp_backing_store_addr;
	u32 sdp_bkext_store_addr;
#ifdef CONFIG_SMP
	long ret_affinity;
	cpumask_t saved_cpu_mask;
	cpumask_t local_cpu_mask = CPU_MASK_NONE;

	cpu_set(0, local_cpu_mask);
	sched_getaffinity(0, &saved_cpu_mask);
	ret_affinity = sched_setaffinity(0, &local_cpu_mask);
	if (ret_affinity != 0)
		dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
#endif

	tf_l4sec_clkdm_wakeup(true);

	workspace_size -= SZ_1M;
	sdp_backing_store_addr = workspace_addr + workspace_size;
	workspace_size -= 0x20000;
	sdp_bkext_store_addr = workspace_addr + workspace_size;

	/*
	 * Implementation notes:
	 *
	 * 1/ The PA buffer (pa_buffer)is now owned by this function.
	 *    In case of error, it is responsible for releasing the buffer.
	 *
	 * 2/ The PA Info and PA Buffer will be freed through a RPC call
	 *    at the beginning of the PA entry in the SE.
	 */

	if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
		dprintk(KERN_ERR "tf_start(%p): "
			"The SMC PA is already started\n", comm);

		ret = -EFAULT;
		goto error1;
	}

	if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) {
		dprintk(KERN_ERR "tf_start(%p): "
			"The L1 structure size is incorrect!\n", comm);
		ret = -EFAULT;
		goto error1;
	}

	ret = tf_se_init(comm, sdp_backing_store_addr,
		sdp_bkext_store_addr);
	if (ret != 0) {
		dprintk(KERN_ERR "tf_start(%p): "
			"SE initialization failed\n", comm);
		goto error1;
	}

	init_shared_buffer =
		(struct tf_init_buffer *)
			internal_get_zeroed_page(GFP_KERNEL);
	if (init_shared_buffer == NULL) {
		dprintk(KERN_ERR "tf_start(%p): "
			"Ouf of memory!\n", comm);

		ret = -ENOMEM;
		goto error1;
	}
	/* Ensure the page is mapped */
	__set_page_locked(virt_to_page(init_shared_buffer));

	l1_shared_buffer =
		(struct tf_l1_shared_buffer *)
			internal_get_zeroed_page(GFP_KERNEL);

	if (l1_shared_buffer == NULL) {
		dprintk(KERN_ERR "tf_start(%p): "
			"Ouf of memory!\n", comm);

		ret = -ENOMEM;
		goto error1;
	}
	/* Ensure the page is mapped */
	__set_page_locked(virt_to_page(l1_shared_buffer));

	dprintk(KERN_INFO "tf_start(%p): "
		"L0SharedBuffer={0x%08x, 0x%08x}\n", comm,
		(u32) init_shared_buffer, (u32) __pa(init_shared_buffer));
	dprintk(KERN_INFO "tf_start(%p): "
		"L1SharedBuffer={0x%08x, 0x%08x}\n", comm,
		(u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer));

	descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer,
			current->mm);
	l1_shared_buffer_descr = (
		((u32) __pa(l1_shared_buffer) & 0xFFFFF000) |
		(descr & 0xFFF));

	pa_info.certificate = (void *) __pa(pa_buffer);
	pa_info.parameters = (void *) __pa(init_shared_buffer);
	pa_info.results = (void *) __pa(init_shared_buffer);

	init_shared_buffer->l1_shared_buffer_descr = l1_shared_buffer_descr;

	init_shared_buffer->backing_store_addr = sdp_backing_store_addr;
	init_shared_buffer->backext_storage_addr = sdp_bkext_store_addr;
	init_shared_buffer->workspace_addr = workspace_addr;
	init_shared_buffer->workspace_size = workspace_size;

	init_shared_buffer->properties_length = properties_length;
	if (properties_length == 0) {
		init_shared_buffer->properties_buffer[0] = 0;
	} else {
		/* Test for overflow */
		if ((init_shared_buffer->properties_buffer +
			properties_length
				> init_shared_buffer->properties_buffer) &&
			(properties_length <=
				init_shared_buffer->properties_length)) {
				memcpy(init_shared_buffer->properties_buffer,
					properties_buffer,
					 properties_length);
		} else {
			dprintk(KERN_INFO "tf_start(%p): "
				"Configuration buffer size from userland is "
				"incorrect(%d, %d)\n",
				comm, (u32) properties_length,
				init_shared_buffer->properties_length);
			ret = -EFAULT;
			goto error1;
		}
	}

	dprintk(KERN_INFO "tf_start(%p): "
		"System Configuration (%d bytes)\n", comm,
		init_shared_buffer->properties_length);
	dprintk(KERN_INFO "tf_start(%p): "
		"Starting PA (%d bytes)...\n", comm, pa_size);

	/*
	 * Make sure all data is visible to the secure world
	 */
	dmac_flush_range((void *)init_shared_buffer,
		(void *)(((u32)init_shared_buffer) + PAGE_SIZE));
	outer_clean_range(__pa(init_shared_buffer),
		__pa(init_shared_buffer) + PAGE_SIZE);

	dmac_flush_range((void *)pa_buffer,
		(void *)(pa_buffer + pa_size));
	outer_clean_range(__pa(pa_buffer),
		__pa(pa_buffer) + pa_size);

	dmac_flush_range((void *)&pa_info,
		(void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info)));
	outer_clean_range(__pa(&pa_info),
		__pa(&pa_info) + sizeof(struct tf_ns_pa_info));
	wmb();

	spin_lock(&(comm->lock));
	comm->init_shared_buffer = init_shared_buffer;
	comm->pBuffer = l1_shared_buffer;
	spin_unlock(&(comm->lock));
	init_shared_buffer = NULL;
	l1_shared_buffer = NULL;

	/*
	 * Set the OS current time in the L1 shared buffer first. The secure
	 * world uses it as itw boot reference time.
	 */
	tf_set_current_time(comm);

	/* Workaround for issue #6081 */
	if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
		disable_nonboot_cpus();

	/*
	 * Start the SMC PA
	 */
	ret = omap4_secure_dispatcher(API_HAL_LM_PALOAD_INDEX,
		FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
		__pa(&pa_info), 0, 0, 0);
	if (ret != API_HAL_RET_VALUE_OK) {
		printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n",
			ret);
		goto error2;
	}

	/* Loop until the first S Yield RPC is received */
loop:
	mutex_lock(&(comm->rpc_mutex));

	if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) {
		dprintk(KERN_INFO "tf_rpc_execute: "
			"Executing CMD=0x%x\n",
			g_RPC_parameters[1]);

		switch (g_RPC_parameters[1]) {
		case RPC_CMD_YIELD:
			dprintk(KERN_INFO "tf_rpc_execute: "
				"RPC_CMD_YIELD\n");
			set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
				&(comm->flags));
			g_RPC_parameters[0] = RPC_SUCCESS;
			break;

		case RPC_CMD_INIT:
			dprintk(KERN_INFO "tf_rpc_execute: "
				"RPC_CMD_INIT\n");
			g_RPC_parameters[0] = tf_rpc_init(comm);
			break;

		case RPC_CMD_TRACE:
			g_RPC_parameters[0] = tf_rpc_trace(comm);
			break;

		default:
			g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS;
			break;
		}
		g_RPC_advancement = RPC_ADVANCEMENT_FINISHED;
	}

	mutex_unlock(&(comm->rpc_mutex));

	ret = tf_schedule_secure_world(comm, false);
	if (ret != 0) {
		printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n",
			ret);
		goto error2;
	}

	if (!test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
		goto loop;

	set_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags);
	wake_up(&(comm->wait_queue));
	ret = 0;

	#if 0
	{
		void *workspace_va;
		workspace_va = ioremap(workspace_addr, workspace_size);
		printk(KERN_INFO
		"Read first word of workspace [0x%x]\n",
		*(uint32_t *)workspace_va);
	}
	#endif

	/* Workaround for issue #6081 */
	if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
		enable_nonboot_cpus();

	goto exit;

error2:
	/* Workaround for issue #6081 */
	if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
		enable_nonboot_cpus();

	spin_lock(&(comm->lock));
	l1_shared_buffer = comm->pBuffer;
	init_shared_buffer = comm->init_shared_buffer;
	comm->pBuffer = NULL;
	comm->init_shared_buffer = NULL;
	spin_unlock(&(comm->lock));

error1:
	if (init_shared_buffer != NULL) {
		__clear_page_locked(virt_to_page(init_shared_buffer));
		internal_free_page((unsigned long) init_shared_buffer);
	}
	if (l1_shared_buffer != NULL) {
		__clear_page_locked(virt_to_page(l1_shared_buffer));
		internal_free_page((unsigned long) l1_shared_buffer);
	}

exit:
#ifdef CONFIG_SMP
	ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
	if (ret_affinity != 0)
		dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
#endif

	tf_l4sec_clkdm_allow_idle(true);

	if (ret > 0)
		ret = -EFAULT;

	return ret;
}
Exemple #8
0
/*
 * zcache_get_swap_cache_page
 *
 * This is an adaption of read_swap_cache_async()
 *
 * If success, page is returned in retpage
 * Returns 0 if page was already in the swap cache, page is not locked
 * Returns 1 if the new page needs to be populated, page is locked
 */
static int zcache_get_swap_cache_page(int type, pgoff_t offset,
				struct page *new_page)
{
	struct page *found_page;
	swp_entry_t entry = swp_entry(type, offset);
	int err;

	BUG_ON(new_page == NULL);
	do {
		/*
		 * First check the swap cache.  Since this is normally
		 * called after lookup_swap_cache() failed, re-calling
		 * that would confuse statistics.
		 */
		found_page = find_get_page(&swapper_space, entry.val);
		if (found_page)
			return 0;

		/*
		 * call radix_tree_preload() while we can wait.
		 */
		err = radix_tree_preload(GFP_KERNEL);
		if (err)
			break;

		/*
		 * Swap entry may have been freed since our caller observed it.
		 */
		err = swapcache_prepare(entry);
		if (err == -EEXIST) { /* seems racy */
			radix_tree_preload_end();
			continue;
		}
		if (err) { /* swp entry is obsolete ? */
			radix_tree_preload_end();
			break;
		}

		/* May fail (-ENOMEM) if radix-tree node allocation failed. */
		__set_page_locked(new_page);
		SetPageSwapBacked(new_page);
		err = __add_to_swap_cache(new_page, entry);
		if (likely(!err)) {
			radix_tree_preload_end();
			lru_cache_add_anon(new_page);
			return 1;
		}
		radix_tree_preload_end();
		ClearPageSwapBacked(new_page);
		__clear_page_locked(new_page);
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		swapcache_free(entry, NULL);
		/* FIXME: is it possible to get here without err==-ENOMEM?
		 * If not, we can dispense with the do loop, use goto retry */
	} while (err != -ENOMEM);

	return -ENOMEM;
}