int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
			struct ion_buffer *buffer, void *vaddr,
			unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	unsigned long vstart, pstart;

	pstart = virt_to_phys(buffer->priv_virt) + offset;
	if (!pstart) {
		WARN(1, "Could not do virt to phys translation on %p\n",
			buffer->priv_virt);
		return -EINVAL;
	}

	vstart = (unsigned long) vaddr;

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		clean_caches(vstart, length, pstart);
		break;
	case ION_IOC_INV_CACHES:
		invalidate_caches(vstart, length, pstart);
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		clean_and_invalidate_caches(vstart, length, pstart);
		break;
	default:
		return -EINVAL;
	}

	return 0;
}
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	unsigned long vstart, pstart;

	pstart = buffer->priv_phys + offset;
	vstart = (unsigned long)vaddr;

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		clean_caches(vstart, length, pstart);
		break;
	case ION_IOC_INV_CACHES:
		invalidate_caches(vstart, length, pstart);
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		clean_and_invalidate_caches(vstart, length, pstart);
		break;
	default:
		return -EINVAL;
	}

	return 0;
}
Exemple #3
0
static int owncloud_commit() {

  clean_caches();

  if( dav_session.ctx )
    ne_session_destroy( dav_session.ctx );
  /* DEBUG_WEBDAV( "********** vio_module_shutdown" ); */

  dav_session.ctx = 0;

  // ne_sock_exit();
  _connected = 0;  /* triggers dav_connect to go through the whole neon setup */

  SAFE_FREE( dav_session.user );
  SAFE_FREE( dav_session.pwd );
  SAFE_FREE( dav_session.session_key);
  SAFE_FREE( dav_session.error_string );

  SAFE_FREE( dav_session.proxy_type );
  SAFE_FREE( dav_session.proxy_host );
  SAFE_FREE( dav_session.proxy_user );
  SAFE_FREE( dav_session.proxy_pwd  );

  return 0;
}
/*
 * Initialize the power management subsystem.
 *
 * Return value:
 *      -ENODEV: initialization failed
 *      0: success
 */
static int __init msm_pm_init(void)
{
	int ret;
	int val;
	enum msm_pm_time_stats_id enable_stats[] = {
		MSM_PM_STAT_REQUESTED_IDLE,
		MSM_PM_STAT_IDLE_SPIN,
		MSM_PM_STAT_IDLE_WFI,
		MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
		MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
		MSM_PM_STAT_IDLE_POWER_COLLAPSE,
		MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
		MSM_PM_STAT_SUSPEND,
		MSM_PM_STAT_FAILED_SUSPEND,
		MSM_PM_STAT_NOT_IDLE,
	};

#ifdef CONFIG_CPU_V7
	pgd_t *pc_pgd;
	pmd_t *pmd;
	unsigned long pmdval;
	unsigned long exit_phys;

	exit_phys = virt_to_phys(msm_pm_collapse_exit);

	/* Page table for cores to come back up safely. */
	pc_pgd = pgd_alloc(&init_mm);
	if (!pc_pgd)
		return -ENOMEM;
	pmd = pmd_offset(pud_offset(pc_pgd + pgd_index(exit_phys), exit_phys),
			 exit_phys);
	pmdval = (exit_phys & PGDIR_MASK) |
		     PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
	pmd[0] = __pmd(pmdval);
	pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));

	msm_saved_state_phys =
		allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE *
					      num_possible_cpus(), 4);
	if (!msm_saved_state_phys)
		return -ENOMEM;
	msm_saved_state = ioremap_nocache(msm_saved_state_phys,
					  CPU_SAVED_STATE_SIZE *
					  num_possible_cpus());
	if (!msm_saved_state)
		return -ENOMEM;

	/* It is remotely possible that the code in msm_pm_collapse_exit()
	 * which turns on the MMU with this mapping is in the
	 * next even-numbered megabyte beyond the
	 * start of msm_pm_collapse_exit().
	 * Map this megabyte in as well.
	 */
	pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
	flush_pmd_entry(pmd);
	msm_pm_pc_pgd = virt_to_phys(pc_pgd);
	clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
		     virt_to_phys(&msm_pm_pc_pgd));
#endif

	msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA,
		sizeof(*msm_pm_smem_data));
	if (msm_pm_smem_data == NULL) {
		printk(KERN_ERR "%s: failed to get smsm_data\n", __func__);
		return -ENODEV;
	}

	ret = msm_timer_init_time_sync(msm_pm_timeout);
	if (ret)
		return ret;

	ret = smsm_change_intr_mask(SMSM_POWER_MASTER_DEM, 0xFFFFFFFF, 0);
	if (ret) {
		printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n",
			__func__, ret);
		return ret;
	}

	if (cpu_is_msm8625()) {
		target_type = TARGET_IS_8625;
		clean_caches((unsigned long)&target_type, sizeof(target_type),
				virt_to_phys(&target_type));

		/*
		 * Configure the MPA5_GDFS_CNT_VAL register for
		 * DBGPWRUPEREQ_OVERRIDE[17:16] = Override the
		 * DBGNOPOWERDN for each cpu.
		 * MPA5_GDFS_CNT_VAL[9:0] = Delay counter for
		 * GDFS control.
		 */
		val = 0x00030002;
		__raw_writel(val, (MSM_CFG_CTL_BASE + 0x38));

		l2x0_base_addr = MSM_L2CC_BASE;
	}

#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
	/* The wakeup_reason field is overloaded during initialization time
	   to signal Modem that Apps will control the low power modes of
	   the memory.
	 */
	msm_pm_smem_data->wakeup_reason = 1;
	smsm_change_state(SMSM_APPS_DEM, 0, DEM_SLAVE_SMSM_RUN);
#endif

	BUG_ON(msm_pm_modes == NULL);

	suspend_set_ops(&msm_pm_ops);

	msm_pm_mode_sysfs_add();
	msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));

	atomic_set(&msm_pm_init_done, 1);
	return 0;
}
Exemple #5
0
u32 ddl_device_init(struct ddl_init_config *ddl_init_config,
	void *client_data)
{
	struct ddl_context *ddl_context;
	struct res_trk_firmware_addr firmware_addr;
	u32 status = VCD_S_SUCCESS, memorytype = PMEM_MEMTYPE;
	void *ptr = NULL;
	DDL_MSG_HIGH("ddl_device_init");

	if ((!ddl_init_config) || (!ddl_init_config->ddl_callback) ||
		(!ddl_init_config->core_virtual_base_addr)) {
		DDL_MSG_ERROR("ddl_dev_init:Bad_argument");
		return VCD_ERR_ILLEGAL_PARM;
	}
	ddl_context = ddl_get_context();
	if (DDL_IS_INITIALIZED(ddl_context)) {
		DDL_MSG_ERROR("ddl_dev_init:Multiple_init");
		return VCD_ERR_ILLEGAL_OP;
	}
	if (!DDL_IS_IDLE(ddl_context)) {
		DDL_MSG_ERROR("ddl_dev_init:Ddl_busy");
		return VCD_ERR_BUSY;
	}
	memset(ddl_context, 0, sizeof(struct ddl_context));
	DDL_BUSY(ddl_context);
	ddl_context->ddl_callback = ddl_init_config->ddl_callback;
	if (ddl_init_config->interrupt_clr)
		ddl_context->interrupt_clr =
			ddl_init_config->interrupt_clr;
	ddl_context->core_virtual_base_addr =
		ddl_init_config->core_virtual_base_addr;
	ddl_context->client_data = client_data;
	ddl_context->ddl_hw_response.arg1 = DDL_INVALID_INTR_STATUS;

	ddl_context->frame_channel_depth = VCD_FRAME_COMMAND_DEPTH;

	DDL_MSG_LOW("%s() : virtual address of core(%x)\n", __func__,
		(u32) ddl_init_config->core_virtual_base_addr);
	vidc_1080p_set_device_base_addr(
		ddl_context->core_virtual_base_addr);
	ddl_context->cmd_state =	DDL_CMD_INVALID;
	ddl_client_transact(DDL_INIT_CLIENTS, NULL);
	ddl_context->fw_memory_size =
		DDL_FW_INST_GLOBAL_CONTEXT_SPACE_SIZE;
	if (memorytype == PMEM_MEMTYPE_SMI) {
		ptr = ddl_pmem_alloc(&ddl_context->dram_base_a,
			ddl_context->fw_memory_size, DDL_KILO_BYTE(128));
	} else {
		if (!res_trk_get_firmware_addr(&firmware_addr) &&
		   firmware_addr.buf_size >= ddl_context->fw_memory_size) {
			if (DDL_ADDR_IS_ALIGNED(firmware_addr.device_addr,
				DDL_KILO_BYTE(128))) {
				ptr = (void *) firmware_addr.base_addr;
				ddl_context->dram_base_a.physical_base_addr =
				ddl_context->dram_base_a.align_physical_addr =
					(u8 *)firmware_addr.device_addr;
				ddl_context->dram_base_a.align_virtual_addr  =
				ddl_context->dram_base_a.virtual_base_addr =
					firmware_addr.base_addr;
				ddl_context->dram_base_a.buffer_size =
					ddl_context->fw_memory_size;
			} else {
				DDL_MSG_ERROR("firmware base not aligned %p",
					(void *)firmware_addr.device_addr);
			}
		}
	}
	if (!ptr) {
		DDL_MSG_ERROR("Memory Aocation Failed for FW Base");
		status = VCD_ERR_ALLOC_FAIL;
	} else {
		DDL_MSG_LOW("%s() : physical address of base(%x)\n",
			 __func__, (u32) ddl_context->dram_base_a.\
			align_physical_addr);
		ddl_context->dram_base_b.align_physical_addr =
			ddl_context->dram_base_a.align_physical_addr;
		ddl_context->dram_base_b.align_virtual_addr  =
			ddl_context->dram_base_a.align_virtual_addr;
	}
	if (!status) {
		ptr = ddl_pmem_alloc(&ddl_context->metadata_shared_input,
			DDL_METADATA_TOTAL_INPUTBUFSIZE,
			DDL_LINEAR_BUFFER_ALIGN_BYTES);
		if (!ptr) {
			DDL_MSG_ERROR("ddl_device_init: metadata alloc fail");
			status = VCD_ERR_ALLOC_FAIL;
		}
	}
	if (!status && !ddl_fw_init(&ddl_context->dram_base_a)) {
		DDL_MSG_ERROR("ddl_dev_init:fw_init_failed");
		status = VCD_ERR_ALLOC_FAIL;
	}
	if (!status && memorytype == PMEM_MEMTYPE_EBI1)
		clean_caches((unsigned long)firmware_addr.base_addr,
		firmware_addr.buf_size,	firmware_addr.device_addr);

	if (!status) {
		ddl_context->cmd_state = DDL_CMD_DMA_INIT;
		ddl_vidc_core_init(ddl_context);
	} else {
		ddl_release_context_buffers(ddl_context);
		DDL_IDLE(ddl_context);
	}
	return status;
}