void sky_sys_rst_NotiToMARM(uint32_t *reason)
{
  struct msm_rpc_client *client;
  static struct oem_rapi_client_streaming_func_arg arg;
  static struct oem_rapi_client_streaming_func_ret ret;
    
  client = oem_rapi_client_init();
  
  if(IS_ERR(client))
  {
    printk(KERN_INFO "[PANTECH] OEM RAPI Error");  
  }
  else
  {
    if(smem_id_vendor0_ptr == NULL)
    {
      smem_id_vendor0_ptr = (smem_id_vendor0_type*)smem_alloc(SMEM_ID_VENDOR0, sizeof(smem_id_vendor0_type));
    }   

    arg.event = 62; //OEM_RAPI_CLIENT_EVENT_SKY_AARM_ERR_LOGGING
    arg.cb_func = 0;
    arg.handle = 0;
    arg.in_len = sizeof(uint32_t);
    arg.input = (char *)reason;
    arg.out_len_valid = 1;
    arg.output_valid = 1;
    arg.output_size = 128;

    ret.out_len = NULL;
    ret.output = NULL;

    oem_rapi_client_streaming_function(client, &arg, &ret);

    smem_id_vendor0_ptr->reset = SW_RESET;	
    if(reason)
      smem_id_vendor0_ptr->reason = *reason;
    else
      smem_id_vendor0_ptr->reason = 0x00000000;
  }
}
Beispiel #2
0
int __init rmt_storage_add_ramfs(void)
{
        struct shared_ramfs_table *ramfs_table;
        struct shared_ramfs_entry *ramfs_entry;
        int index;

        ramfs_table = smem_alloc(SMEM_SEFS_INFO,
                        sizeof(struct shared_ramfs_table));

        if (!ramfs_table) {
                printk(KERN_WARNING "%s: No RAMFS table in SMEM\n", __func__);
                return -ENOENT;
        }

        if ((ramfs_table->magic_id != (u32) RAMFS_INFO_MAGICNUMBER) ||
                (ramfs_table->version != (u32) RAMFS_INFO_VERSION)) {
                printk(KERN_WARNING "%s: Magic / Version mismatch:, "
                       "magic_id=%#x, format_version=%#x\n", __func__,
                       ramfs_table->magic_id, ramfs_table->version);
                return -ENOENT;
        }

	for (index = 0; index < ramfs_table->entries; index++) {
                ramfs_entry = &ramfs_table->ramfs_entry[index];

                /* Find a match for the Modem Storage RAMFS area */
                if (ramfs_entry->client_id == (u32) RAMFS_MODEMSTORAGE_ID) {
                        printk(KERN_INFO "%s: RAMFS Info (from SMEM): "
                                "Baseaddr = 0x%08x, Size = 0x%08x\n", __func__,
                                ramfs_entry->base_addr, ramfs_entry->size);

                        rmt_storage_resources[0].start = ramfs_entry->base_addr;
                        rmt_storage_resources[0].end = ramfs_entry->base_addr +
                                                        ramfs_entry->size - 1;
                        msm_register_device(&rmt_storage_device, ramfs_entry);
                        return 0;
                }
        }
        return -ENOENT;
}
int sky_sys_rst_SetLcdBLStatus(uint32_t eBrightness)
{
  int r;	
  uint32_t type; 

  if(smem_id_vendor0_ptr == NULL)
  {
    smem_id_vendor0_ptr = (smem_id_vendor0_type*)smem_alloc(SMEM_ID_VENDOR0, sizeof(smem_id_vendor0_type));
  } 

  type = SMEM_PROC_COMM_CUSTOMER_CMD1_SET_BL_STATUS;
  r = msm_proc_comm(PCOM_CUSTOMER_CMD1, &type, &eBrightness);
  if(r >= 0)
  {
    if(!eBrightness) {
      smem_id_vendor0_ptr->backlight = RST_LCD_BL_OFF;//MAIN_LCD_BL_OFF;	
    } else {
      smem_id_vendor0_ptr->backlight = RST_LCD_BL_ON;//MAIN_LCD_BL_ON;
    }
  }
  return r;
}
static void msm_gpio_sleep_int(unsigned long arg)
{
	int i, j;
	struct tramp_gpio_smem *smem_gpio;

	BUILD_BUG_ON(NR_GPIO_IRQS > NUM_GPIO_SMEM_BANKS * 32);

	smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio));
	if (smem_gpio == NULL)
		return;

	local_irq_disable();
	for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++) {
		int count = smem_gpio->num_fired[i];
		for (j = 0; j < count; j++) {
			/* TODO: Check mask */
			generic_handle_irq(
				MSM_GPIO_TO_INT(smem_gpio->fired[i][j]));
		}
	}
	local_irq_enable();
}
int sky_sys_rst_SetUserReset(uint32_t *reason)
{
  int r;	
  uint32_t type; 

  if(smem_id_vendor0_ptr == NULL)
  {
    smem_id_vendor0_ptr = (smem_id_vendor0_type*)smem_alloc(SMEM_ID_VENDOR0, sizeof(smem_id_vendor0_type));
  } 

  type = SMEM_PROC_COMM_CUSTOMER_CMD1_SET_USER_RESET;
  r = msm_proc_comm(PCOM_CUSTOMER_CMD1, &type, reason);
  if(r >= 0)
  {
    smem_id_vendor0_ptr->reset = USER_RESET;	
    if(reason)
      smem_id_vendor0_ptr->reason = *reason;
    else
      smem_id_vendor0_ptr->reason = 0x00000000;
  }
  return r;
}
int msm_gpio_exit_sleep(void)
{
	int i, j, r, mask;
	unsigned val;
	struct tramp_gpio_smem *smem_gpio;

	smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio));

	r = 0;
	for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
		struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i];
		__raw_writel(msm_gpio_chips[i].int_enable[0],
		       msm_gpio_chips[i].regs.int_en);
		val = __raw_readl(msm_chip->regs.int_status);
		val &= msm_chip->int_enable[0];
		while (val) {
			mask = val & -val;
			j = fls(mask) - 1;
			pr_info("%s %08x %08x bit %d gpio %d irq %d\n",
					__func__, val, mask, j,
					msm_chip->chip.base + j,
					FIRST_GPIO_IRQ +
					msm_chip->chip.base + j);
			val &= ~mask;
			r++;
		}
	}
	mb();

	if (smem_gpio && (smem_gpio->num_fired[0] || smem_gpio->num_fired[1])) {
		if (msm_gpio_debug_mask & GPIO_DEBUG_SLEEP)
			printk(KERN_INFO "gpio: fired %x %x\n",
			      smem_gpio->num_fired[0], smem_gpio->num_fired[1]);
		tasklet_schedule(&msm_gpio_sleep_int_tasklet);
	}

	return r;
}
void __init acer_ts_init(void)
{
	int ret = 0;
	struct kobject *properties_kobj;
	acer_smem_flag_t *acer_smem_flag;

	acer_smem_flag = (acer_smem_flag_t *)(smem_alloc(SMEM_ID_VENDOR0, sizeof(acer_smem_flag_t)));
	if (acer_smem_flag != NULL && acer_smem_flag->acer_hw_version < ACER_HW_VERSION_PVT)
		a4_ts_board_info[0].irq = MSM_GPIO_TO_INT(A4_GPIO_CYP_TP_IRQ1);
	else
		a4_ts_board_info[0].irq = MSM_GPIO_TO_INT(A4_GPIO_CYP_TP_IRQ2);

	i2c_register_board_info(0, a4_ts_board_info,
			ARRAY_SIZE(a4_ts_board_info));

	properties_kobj = kobject_create_and_add("board_properties", NULL);
	if (properties_kobj)
		ret = sysfs_create_group(properties_kobj,
				&a4_properties_attr_group);

	if (!properties_kobj || ret)
		pr_err("failed to create board_properties\n");
}
int _remote_spin_lock_init(remote_spin_lock_id_t id, _remote_spinlock_t *lock)
{
	_remote_spinlock_t spinlock_start;

	/* The raw_spinlock_t structure should be the same as
	 * raw_remote_spinlock_t to be able to reuse the __raw_spin_lock()
	 * and __raw_spin_unlock() functions. If this condition is not met,
	 * then please write new code to replace calls to __raw_spin_lock()
	 * and __raw_spin_unlock(). */
	BUILD_BUG_ON(sizeof(raw_remote_spinlock_t) != sizeof(raw_spinlock_t));

	if (id >= SMEM_SPINLOCK_COUNT)
		return -EINVAL;

	spinlock_start = smem_alloc(SMEM_SPINLOCK_ARRAY,
				    SMEM_SPINLOCK_ARRAY_SIZE);
	if (spinlock_start == NULL)
		return -ENXIO;

	*lock = spinlock_start + id;

	return 0;
}
Beispiel #9
0
int msm8x25_init(void)
{
    DUMP_BUILD_TIME;//LIDBG_MODULE_LOG;

    lidbg( "smem_alloc id = %d\n", SMEM_ID_VENDOR0);
    p_fly_smem = (struct fly_smem *)smem_alloc(SMEM_ID_VENDOR0, sizeof(struct fly_smem));

    if (p_fly_smem == NULL)
    {
        lidbg( "smem_alloc fail,kmalloc mem!\n");
        p_fly_smem = (struct fly_smem *)kmalloc(sizeof(struct fly_smem), GFP_KERNEL);
        if(p_fly_smem == NULL)
        {
            LIDBG_ERR("<err.register_wakelock:kzalloc.name>\n");
        }
        memset(p_fly_smem, 0, sizeof(struct fly_smem));
    }

    soc_bl_init();
    soc_io_init();
    soc_ad_init();

    return 0;
}
Beispiel #10
0
/**
 * max8649_probe - probe the max8649 driver
 * @client: Pointer to the i2c_client structure
 * @dev_id: Pointer to the i2c_device_id structure
 *
 * Returns negative errno, else zero on success.
 */
static int max8649_probe(struct i2c_client *client,
        const struct i2c_device_id *dev_id)
{
    unsigned char *ic_ver;
    
    if(!client){
        printk(KERN_ERR "MAX8649 Invalid i2c client\n");
        return -EINVAL;
    }

    if (!i2c_check_functionality(client->adapter,
                I2C_FUNC_SMBUS_BYTE_DATA)) {
        printk(KERN_ERR "MAX8649 does not support SMBUS_BYTE_DATA.\n");
        return -EINVAL;
    }

    maxclient = client;

    ic_ver = smem_alloc(SMEM_OEM_028,sizeof(char));
    if(ic_ver == NULL){
        printk(KERN_ERR "smem_alloc Err.\n");
        return -EINVAL;
    }
    else if(*ic_ver == IC_MAX8649){
        max8649_set_volts();
    }
    else if(*ic_ver == IC_MAX8952){
        max8952_set_volts();
    }
    else{
        printk(KERN_ERR "smem_alloc Err.\n");
        return -EINVAL;
	}

    return 0;
}
/*
 * Initialize the power management subsystem.
 *
 * Return value:
 *      -ENODEV: initialization failed
 *      0: success
 */
static int __init msm_pm_init(void)
{
	int ret;
	int val;
	enum msm_pm_time_stats_id enable_stats[] = {
		MSM_PM_STAT_REQUESTED_IDLE,
		MSM_PM_STAT_IDLE_SPIN,
		MSM_PM_STAT_IDLE_WFI,
		MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
		MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
		MSM_PM_STAT_IDLE_POWER_COLLAPSE,
		MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
		MSM_PM_STAT_SUSPEND,
		MSM_PM_STAT_FAILED_SUSPEND,
		MSM_PM_STAT_NOT_IDLE,
	};

#ifdef CONFIG_CPU_V7
	pgd_t *pc_pgd;
	pmd_t *pmd;
	unsigned long pmdval;
	unsigned long exit_phys;

	exit_phys = virt_to_phys(msm_pm_collapse_exit);

	/* Page table for cores to come back up safely. */
	pc_pgd = pgd_alloc(&init_mm);
	if (!pc_pgd)
		return -ENOMEM;
	pmd = pmd_offset(pud_offset(pc_pgd + pgd_index(exit_phys), exit_phys),
			 exit_phys);
	pmdval = (exit_phys & PGDIR_MASK) |
		     PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
	pmd[0] = __pmd(pmdval);
	pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));

	msm_saved_state_phys =
		allocate_contiguous_ebi_nomap(CPU_SAVED_STATE_SIZE *
					      num_possible_cpus(), 4);
	if (!msm_saved_state_phys)
		return -ENOMEM;
	msm_saved_state = ioremap_nocache(msm_saved_state_phys,
					  CPU_SAVED_STATE_SIZE *
					  num_possible_cpus());
	if (!msm_saved_state)
		return -ENOMEM;

	/* It is remotely possible that the code in msm_pm_collapse_exit()
	 * which turns on the MMU with this mapping is in the
	 * next even-numbered megabyte beyond the
	 * start of msm_pm_collapse_exit().
	 * Map this megabyte in as well.
	 */
	pmd[2] = __pmd(pmdval + (2 << (PGDIR_SHIFT - 1)));
	flush_pmd_entry(pmd);
	msm_pm_pc_pgd = virt_to_phys(pc_pgd);
	clean_caches((unsigned long)&msm_pm_pc_pgd, sizeof(msm_pm_pc_pgd),
		     virt_to_phys(&msm_pm_pc_pgd));
#endif

	msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA,
		sizeof(*msm_pm_smem_data));
	if (msm_pm_smem_data == NULL) {
		printk(KERN_ERR "%s: failed to get smsm_data\n", __func__);
		return -ENODEV;
	}

	ret = msm_timer_init_time_sync(msm_pm_timeout);
	if (ret)
		return ret;

	ret = smsm_change_intr_mask(SMSM_POWER_MASTER_DEM, 0xFFFFFFFF, 0);
	if (ret) {
		printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n",
			__func__, ret);
		return ret;
	}

	if (cpu_is_msm8625()) {
		target_type = TARGET_IS_8625;
		clean_caches((unsigned long)&target_type, sizeof(target_type),
				virt_to_phys(&target_type));

		/*
		 * Configure the MPA5_GDFS_CNT_VAL register for
		 * DBGPWRUPEREQ_OVERRIDE[17:16] = Override the
		 * DBGNOPOWERDN for each cpu.
		 * MPA5_GDFS_CNT_VAL[9:0] = Delay counter for
		 * GDFS control.
		 */
		val = 0x00030002;
		__raw_writel(val, (MSM_CFG_CTL_BASE + 0x38));

		l2x0_base_addr = MSM_L2CC_BASE;
	}

#ifdef CONFIG_MSM_MEMORY_LOW_POWER_MODE
	/* The wakeup_reason field is overloaded during initialization time
	   to signal Modem that Apps will control the low power modes of
	   the memory.
	 */
	msm_pm_smem_data->wakeup_reason = 1;
	smsm_change_state(SMSM_APPS_DEM, 0, DEM_SLAVE_SMSM_RUN);
#endif

	BUG_ON(msm_pm_modes == NULL);

	suspend_set_ops(&msm_pm_ops);

	msm_pm_mode_sysfs_add();
	msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));

	atomic_set(&msm_pm_init_done, 1);
	return 0;
}
void msm_gpio_enter_sleep(int from_idle)
{
	int i;
	struct tramp_gpio_smem *smem_gpio;

	smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio));

	if (smem_gpio) {
		for (i = 0; i < ARRAY_SIZE(smem_gpio->enabled); i++) {
			smem_gpio->enabled[i] = 0;
			smem_gpio->detection[i] = 0;
			smem_gpio->polarity[i] = 0;
		}
	}

	for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
		__raw_writel(msm_gpio_chips[i].int_enable[!from_idle],
		       msm_gpio_chips[i].regs.int_en);
		if (smem_gpio) {
			uint32_t tmp;
			int start, index, shiftl, shiftr;
			start = msm_gpio_chips[i].chip.base;
			index = start / 32;
			shiftl = start % 32;
			shiftr = 32 - shiftl;
			tmp = msm_gpio_chips[i].int_enable[!from_idle];
			smem_gpio->enabled[index] |= tmp << shiftl;
			smem_gpio->enabled[index+1] |= tmp >> shiftr;
			smem_gpio->detection[index] |=
				__raw_readl(msm_gpio_chips[i].regs.int_edge) <<
				shiftl;
			smem_gpio->detection[index+1] |=
				__raw_readl(msm_gpio_chips[i].regs.int_edge) >>
				shiftr;
			smem_gpio->polarity[index] |=
				__raw_readl(msm_gpio_chips[i].regs.int_pos) <<
				shiftl;
			smem_gpio->polarity[index+1] |=
				__raw_readl(msm_gpio_chips[i].regs.int_pos) >>
				shiftr;
		}
	}
	mb();

	if (smem_gpio) {
		if (msm_gpio_debug_mask & GPIO_DEBUG_SLEEP)
			for (i = 0; i < ARRAY_SIZE(smem_gpio->enabled); i++) {
				printk("msm_gpio_enter_sleep gpio %d-%d: enable"
				       " %08x, edge %08x, polarity %08x\n",
				       i * 32, i * 32 + 31,
				       smem_gpio->enabled[i],
				       smem_gpio->detection[i],
				       smem_gpio->polarity[i]);
			}
		for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++)
			smem_gpio->num_fired[i] = 0;
	}
	msm_gpio_show_suspend_state(ldo_gpios0,
			ARRAY_SIZE(ldo_gpios0), &msm_gpio_chips[0]);

	msm_gpio_show_suspend_state(ldo_gpios1,
			ARRAY_SIZE(ldo_gpios1), &msm_gpio_chips[1]);

	msm_gpio_show_suspend_state(ldo_gpios2,
			ARRAY_SIZE(ldo_gpios2), &msm_gpio_chips[2]);

	msm_gpio_show_suspend_state(ldo_gpios3,
			ARRAY_SIZE(ldo_gpios3), &msm_gpio_chips[3]);

	msm_gpio_show_suspend_state(ldo_gpios4,
			ARRAY_SIZE(ldo_gpios4), &msm_gpio_chips[4]);

	msm_gpio_show_suspend_state(ldo_gpios5,
			ARRAY_SIZE(ldo_gpios5), &msm_gpio_chips[5]);

	msm_gpio_show_suspend_state(ldo_gpios6,
			ARRAY_SIZE(ldo_gpios6), &msm_gpio_chips[6]);

	msm_gpio_show_suspend_state(ldo_gpios7,
			ARRAY_SIZE(ldo_gpios7), &msm_gpio_chips[7]);
}
Beispiel #13
0
//---------------------------------------------------------------------
// msm_ioctl_control()
//---------------------------------------------------------------------
int cam12mp_sensor_config(void __user *argp)
{
extern void adxl345_stop_ap(void);
extern void adxl345_start_ap(void);

    struct CameraSensorI2CCmdType   I2CCmd;
    struct sensor_cfg_data cfg;
    uint32_t *smem_ptr = NULL;
    int   rc = 0;

    if (copy_from_user(&cfg, (void *)argp, sizeof(struct sensor_cfg_data)))
        return -EFAULT;

    mutex_lock(&cam12mp_mtx);
    switch (cfg.cfgtype) {
    case CFG_PWR_UP:
        adxl345_stop_ap();
        rc = cam12mp_dl(&cfg.cfg.dl);
        adxl345_start_ap();
        break;

    case CFG_GET_TEMP:
        smem_ptr = (uint32_t *)smem_alloc(SMEM_OEM_013, sizeof(int)); 
        if(smem_ptr == NULL){
            LOGE("+%s (CFG_GET_TEMP)\n", __func__);
            rc = -EINVAL;
        } else {
            cfg.cfg.temp = *smem_ptr;
            if (copy_to_user((void *)argp, &cfg, sizeof(struct sensor_cfg_data)))
                rc = -EFAULT;
        }
        break;

    case CFG_SET_LED:
        cam12mp_led_control(cfg.cfg.led);
        break;

    case CFG_COMMAND:
        if (!cfg.cfg.cmd.txlen || cfg.cfg.cmd.txlen > 16 || cfg.cfg.cmd.rxlen > 64) {
            LOGI("+%s (%d)\n", __func__,cfg.cfgtype);
            rc = -EINVAL;
            break;
        }
        _I2C_LOG_ = cfg.rs;
        I2CCmd.slave_addr = 0x78;
        I2CCmd.pwdata     = cfg.cfg.cmd.tx;
        I2CCmd.wlen       = cfg.cfg.cmd.txlen;
        I2CCmd.prdata     = cfg.cfg.cmd.rx;
        I2CCmd.rlen       = cfg.cfg.cmd.rxlen;
        if (!cfg.cfg.cmd.rxlen)
            rc = camsensor_gpioi2c_write(&I2CCmd);
        else
            rc = camsensor_gpioi2c_read(&I2CCmd);

        if (!rc)
            if (copy_to_user((void *)argp, &cfg, sizeof(struct sensor_cfg_data)))
                rc = -EFAULT;
        _I2C_LOG_ = 1;
        break;

    default:
        LOGI("+%s (%d)\n", __func__,cfg.cfgtype);
        rc = -EINVAL;
        break;
    }
    mutex_unlock(&cam12mp_mtx);

    if (rc) LOGI("-%s Done.(%d)\n", __func__, rc);
    return rc;
}
Beispiel #14
0
static ssize_t misc_helper_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos)
{
	unsigned char cmd[64];
	int len;
	int i, r;
	unsigned int id, val;
	unsigned char *uchar;
	char *soc_baseband_id, *args[10]={0};

	static char bp_version[32]={0};
	static int  bp_taken=0;
	static smem_mot_vendor1_type *vendor1;

	if (count < 1)
		return 0;

	len = count > 63 ? 63 : count;

	if (copy_from_user(cmd, buf, len))
		return -EFAULT;

	cmd[len] = 0;

	/* lazy */
	if (cmd[len-1] == '\n') {
		cmd[len-1] = 0;
		len--;
	}

	if ((r = misc_helper_args (args, cmd, len)) > 0) {
		for (i=0; i < r; i++)
			D(KERN_ERR "ARG[%d] %s\n", i, args[i]);
	}

    if (r == 0)
        return count;

	switch (misc_helper_get_cmd (args[0], r))
	 {
	 case CMD_BP_VER :
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		if (! bp_taken) {
			soc_baseband_id = socinfo_get_baseband_id();
			if (soc_baseband_id) {
				strncpy (bp_version, soc_baseband_id, sizeof (bp_version)-1);
				bp_version[31] = 0;
			}else
				strcpy (bp_version, "undefined");
			bp_taken = 1;
		}
		D("%s(): %s\n", misc_helper_cmd_name[CMD_BP_VER], bp_version);
		sprintf (misc_helper_info.data.buffer, "%s", bp_version);
			break;

	 case CMD_AP_FLASH :
		if(!0) {
			misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
			id = PROCCOMM_MODEM_SET_AP_FLASH_REASON;
			val = 0;
			r = meta_proc(id, &val);
			D("%s()=%08x,%08x rc=%d\n", misc_helper_cmd_name[CMD_AP_FLASH], id, val, r);
			sprintf (misc_helper_info.data.buffer, "%u %u", id, val);
		} else
			sprintf (misc_helper_info.data.buffer, "Permissions denied");
			break;

	 case CMD_UUID_HIGH :
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		id = 0;
		val = 0;
		r = msm_proc_comm (PCOM_GET_UUID_HIGH, &id, &val);
		D("%s()=%08x,%08x rc=%d\n", misc_helper_cmd_name[CMD_UUID_HIGH], id, val, r);
		uchar = (unsigned char *)&id;
		sprintf (misc_helper_info.data.buffer, "%02x %02x %02x %02x ", *uchar, *uchar+1, *uchar+2, *uchar+3);
		uchar = (unsigned char *)&val;
		sprintf (misc_helper_info.data.buffer+strlen(misc_helper_info.data.buffer),
			 "%02x %02x %02x %02x", *uchar, *uchar+1, *uchar+2, *uchar+3);
			break;

	 case CMD_UUID_LOW :
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		id = 0;
		val = 0;
		r = msm_proc_comm (PCOM_GET_UUID_LOW, &id, &val);
		D("%s()=%08x,%08x rc=%d\n", misc_helper_cmd_name[CMD_UUID_LOW], id, val, r);
		uchar = (unsigned char *)&id;
		sprintf (misc_helper_info.data.buffer, "%02x %02x %02x %02x ", *uchar, *uchar+1, *uchar+2, *uchar+3);
		uchar = (unsigned char *)&val;
		sprintf (misc_helper_info.data.buffer+strlen(misc_helper_info.data.buffer),
			 "%02x %02x %02x %02x", *uchar, *uchar+1, *uchar+2, *uchar+3);
			break;

	 case CMD_HELP :
		if (!is_secure_hw()) {
			misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
			misc_helper_usage (args, r);
		} else
			sprintf (misc_helper_info.data.buffer, "Permissions denied");
			break;

	 case CMD_GPIO_GET :
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		id = (unsigned)simple_strtoul (args[1], NULL, 10);
		val = gpio_get_value (id);
		D("%s(%u)=%u\n", misc_helper_cmd_name[CMD_GPIO_GET], id, val);
		sprintf (misc_helper_info.data.buffer, "%u", val);
			break;

	 case CMD_BATTERY :
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		id = 0;
		val = 0;
		r = msm_proc_comm (PCOM_GET_BATT_LEVEL, &id, &val);
		D("%s()=%u,%u rc=%d\n", misc_helper_cmd_name[CMD_BATTERY], id, val, r);
		sprintf (misc_helper_info.data.buffer, "%u %u", id, val);
			break;

	 case CMD_CHARGING :
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		id = 0;
		val = 0;
		r = msm_proc_comm (PCOM_CHG_IS_CHARGING, &id, &val);
		D("%s()=%u,%u rc=%d\n", misc_helper_cmd_name[CMD_CHARGING], id, val, r);
		sprintf (misc_helper_info.data.buffer, "%u %u", id, val);
			break;

	 case CMD_RESET :
		if(!0) { //if (!is_secure_hw()) {
			misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
			id = 0;
			val = 0;
			r = msm_proc_comm (PCOM_RESET_MODEM, &id, &val);
			D("%s()=%u,%u rc=%d\n", misc_helper_cmd_name[CMD_RESET], id, val, r);
			sprintf (misc_helper_info.data.buffer, "%u %u", id, val);
		} else
			sprintf (misc_helper_info.data.buffer, "Permissions denied");
			break;

	 case CMD_CHIP_PWRDN :
		if(!0) { //if (!is_secure_hw()) {
			misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
			id = 0;
			val = 0;
			r = msm_proc_comm (PCOM_POWER_DOWN, &id, &val);
			D("%s()=%u,%u rc=%d\n", misc_helper_cmd_name[CMD_CHIP_PWRDN], id, val, r);
			sprintf (misc_helper_info.data.buffer, "%u %u", id, val);
		} else
			sprintf (misc_helper_info.data.buffer, "Permissions denied");
			break;

	 case CMD_NVREAD :
		if (!0) { //if (!is_secure_hw()) {
			misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
			id = (unsigned)simple_strtoul (args[1], NULL, 10);
			id = (id << 8) | PROCCOMM_NV_READ;
			val = 0;
			r = msm_proc_comm (PCOM_CUSTOMER_CMD3, &id, &val);
			D("%s(0x%08x)=%u rc=%d\n", misc_helper_cmd_name[CMD_NVREAD], id, val, r);
			sprintf (misc_helper_info.data.buffer, "%u %u", id, val);
		} else
			sprintf (misc_helper_info.data.buffer, "Permissions denied");
			break;

	 case CMD_NVWRITE :
		if (!0) { //if (!is_secure_hw()) {
			misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
			id = (unsigned)simple_strtoul (args[1], NULL, 10);
			id = (id << 8) | PROCCOMM_NV_WRITE;
			val = (unsigned)simple_strtoul (args[2], NULL, 10);
			r = msm_proc_comm (PCOM_CUSTOMER_CMD3, &id, &val);
			D("%s(0x%08x, %u) rc=%d\n", misc_helper_cmd_name[CMD_NVWRITE], id, val, r);
			sprintf (misc_helper_info.data.buffer, "%d", r);
		} else
			sprintf (misc_helper_info.data.buffer, "Permissions denied");
			break;

#if 0
	 case CMD_NVR :
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		id = (unsigned)simple_strtoul (args[1], NULL, 10);
		val = 0; 
		r = msm_proc_comm (PCOM_NV_READ, &id, &val);
		D("%s(%u)=%u rc=%d\n", misc_helper_cmd_name[CMD_NVR], id, val, r);
		sprintf (misc_helper_info.data.buffer, "%u %u", id, val);
			break;

	 case CMD_NVW :
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		id = (unsigned)simple_strtoul (args[1], NULL, 10);
		val = (unsigned)simple_strtoul (args[2], NULL, 10);
		r = msm_proc_comm (PCOM_NV_WRITE, &id, &val);
		D("%s(%u, %u) rc=%d\n", misc_helper_cmd_name[CMD_NVW], id, val, r);
		sprintf (misc_helper_info.data.buffer, "%d", r);
			break;
#endif

	 case CMD_GPIO_SET :
		if (!is_secure_hw()) {
			misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
			id = (unsigned)simple_strtoul (args[1], NULL, 10);
			val = (unsigned)simple_strtoul (args[2], NULL, 10);
			r = gpio_set_value (id, val);
			val = gpio_get_value (id);
			D("%s(%u, %u) rc=%d\n", misc_helper_cmd_name[CMD_GPIO_SET], id, val, r);
			sprintf (misc_helper_info.data.buffer, "%u", val);
		} else
			sprintf (misc_helper_info.data.buffer, "Permissions denied");
			break;

     case CMD_VREG :
	if (!is_secure_hw()) {
           misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
           {
            unsigned int mv = (unsigned)simple_strtoul (args[2], NULL, 10);
            int rc;
            struct vreg *vreg;
            vreg = vreg_get(0,args[1]);
            if (IS_ERR(vreg)) {
                sprintf(misc_helper_info.data.buffer, "* bad name (%s)", args[1]);
                break;
            }
            if (mv > 0) {
                if ((rc = vreg_set_level(vreg, mv))) {
                    sprintf(misc_helper_info.data.buffer, "* failed to set level (%d mV) [%d]", mv, rc);
                    break;
                }
                if ((rc = vreg_enable(vreg))) {
                    sprintf(misc_helper_info.data.buffer, "* failed to enable [%d]", rc);
                    break;
                }
            }
            else {
                if ((rc = vreg_disable(vreg))) {
                    sprintf(misc_helper_info.data.buffer, "* failed to disable [%d]", rc);
                    break;
                }
            }
            D("%s(%s,%d) rc=%d\n", misc_helper_cmd_name[CMD_VREG],args[1],mv,rc);
           }
           sprintf(misc_helper_info.data.buffer, "OK");
	} else
		sprintf (misc_helper_info.data.buffer, "Permissions denied");
        		break;

     case CMD_CLK :
	if (!is_secure_hw()) {
	   misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
	   {
            unsigned int rate = (unsigned)simple_strtoul (args[2], NULL, 10);
            int rc = 0;
            struct clk *clk;
            clk = clk_get(NULL, args[1]);
            if (IS_ERR(clk)) {
                sprintf(misc_helper_info.data.buffer, "* bad name (%s)", args[1]);
                break;
            }
            if (rate > 0) {
                if ((rc = clk_enable(clk))) {
                    sprintf(misc_helper_info.data.buffer, "* failed to enable clk [%d]", rc);
                    break;
                }
                if ((rc = clk_set_rate(clk,rate))) {
                    sprintf(misc_helper_info.data.buffer, "* failed to set rate [%d]", rc);
                    break;
                }
            }
            else {
                clk_disable(clk);
            }
            D("%s(%s,%d) rc=%d\n", misc_helper_cmd_name[CMD_CLK],args[1],rate,rc);
           }
           sprintf(misc_helper_info.data.buffer, "OK");
	} else
		sprintf (misc_helper_info.data.buffer, "Permissions denied");
        		break;

	 case CMD_FB_SET :
		if (!is_secure_hw()) {
			misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
			id = (1 << 8) | PROCCOMM_FACTORY_BYTE;
			val = (unsigned)simple_strtoul (args[1], NULL, 10);
			r = msm_proc_comm (PCOM_CUSTOMER_CMD3, &id, &val);
			D("%s(%u)=%u\n", misc_helper_cmd_name[CMD_FB_SET], id, val);
			sprintf (misc_helper_info.data.buffer, "%u", val);
		} else
			sprintf (misc_helper_info.data.buffer, "Permissions denied");
			break;

	 case CMD_TRUSTED_BOOT :
		if (! vendor1) {
			vendor1 = smem_alloc(SMEM_ID_VENDOR1, sizeof(smem_mot_vendor1_type));
		}
		if (vendor1) {
			r = 0;
			val = (unsigned int)vendor1->trusted_boot;

		} else {
			r = -1;
			val = -1;
		}
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		D("%s(is_boot_trusted)=%u, rc=%d\n", misc_helper_cmd_name[CMD_TRUSTED_BOOT], val, r);
		sprintf (misc_helper_info.data.buffer, "%u", val);
			break;


	 case CMD_SECURE :
		r = 0;
		val = (unsigned int)is_secure_hw();
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		D("%s(is_security_on)=%u, rc=%d\n", misc_helper_cmd_name[CMD_SECURE], val, r);
		sprintf (misc_helper_info.data.buffer, "%u", val);
			break;


	 case CMD_FB_GET :
		if (! vendor1) {
			vendor1 = smem_alloc(SMEM_ID_VENDOR1, sizeof(smem_mot_vendor1_type));
		}
		if (vendor1) {
			r = 0;
			val = (unsigned int)vendor1->fact_byte;

		} else {
			r = -1;
			val = -1;
		}
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		D("%s(10024)=%u, rc=%d\n", misc_helper_cmd_name[CMD_FB_GET], val, r);
		sprintf (misc_helper_info.data.buffer, "%u", val);
			break;

	 case CMD_MAX :
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF;
		sprintf (misc_helper_info.data.buffer, "missing parameters for command: %s", args[0]);
			break;
	 default :
		misc_helper_info.response |= MH_RESP_REQUIRED | MH_RESP_READY | MH_RESP_FMT_BUFF; 
		sprintf (misc_helper_info.data.buffer, "command: %s is not supported", cmd);
	}
	return count;
}
Beispiel #15
0
	/* Time Slave State Bits */
	#define SLAVE_TIME_REQUEST         0x0400
	#define SLAVE_TIME_POLL            0x0800
	#define SLAVE_TIME_INIT            0x1000

	uint32_t *smem_clock;
	uint32_t smem_clock_val;
	uint32_t state;

	smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE, sizeof(uint32_t));
	if (smem_clock == NULL) {
		printk(KERN_ERR "no smem clock\n");
		return 0;
	}

	state = smsm_get_state(SMSM_MODEM_STATE);
	if ((state & SMSM_INIT) == 0) {
		printk(KERN_ERR "smsm not initialized\n");
		return 0;
	}

	time_start(data);
	while ((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) &
		MASTER_TIME_PENDING) {
		if (time_expired(data)) {
			printk(KERN_INFO "get_smem_clock: timeout 1 still "
				"invalid state %x\n", state);
			return 0;
		}
	}

	smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_POLL | SLAVE_TIME_INIT,
		SLAVE_TIME_REQUEST);

	time_start(data);
	while (!((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) &
		MASTER_TIME_PENDING)) {
		if (time_expired(data)) {
			printk(KERN_INFO "get_smem_clock: timeout 2 still "
				"invalid state %x\n", state);
			smem_clock_val = 0;
			goto sync_sclk_exit;
		}
	}

	smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST, SLAVE_TIME_POLL);

	time_start(data);
	do {
		smem_clock_val = *smem_clock;
	} while (smem_clock_val == 0 && !time_expired(data));

	state = smsm_get_state(SMSM_TIME_MASTER_DEM);

	if (smem_clock_val) {
		if (update != NULL)
			update(data, smem_clock_val);

		if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
			printk(KERN_INFO
				"get_smem_clock: state %x clock %u\n",
				state, smem_clock_val);
	} else {
		printk(KERN_INFO "get_smem_clock: timeout state %x clock %u\n",
			state, smem_clock_val);
	}

sync_sclk_exit:
	smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST | SLAVE_TIME_POLL,
		SLAVE_TIME_INIT);
	return smem_clock_val;
}
#else /* CONFIG_MSM_N_WAY_SMSM */
static uint32_t msm_timer_sync_sclk(
	void (*time_start)(struct msm_timer_sync_data_t *data),
	bool (*time_expired)(struct msm_timer_sync_data_t *data),
	void (*update)(struct msm_timer_sync_data_t *data, uint32_t clk_val),
	struct msm_timer_sync_data_t *data)
{
	uint32_t *smem_clock;
	uint32_t smem_clock_val;
	uint32_t last_state;
	uint32_t state;

	smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE,
				sizeof(uint32_t));

	if (smem_clock == NULL) {
		printk(KERN_ERR "no smem clock\n");
		return 0;
	}

	last_state = state = smsm_get_state(SMSM_MODEM_STATE);
	smem_clock_val = *smem_clock;
	if (smem_clock_val) {
		printk(KERN_INFO "get_smem_clock: invalid start state %x "
			"clock %u\n", state, smem_clock_val);
		smsm_change_state(SMSM_APPS_STATE,
				  SMSM_TIMEWAIT, SMSM_TIMEINIT);

		time_start(data);
		while (*smem_clock != 0 && !time_expired(data))
			;

		smem_clock_val = *smem_clock;
		if (smem_clock_val) {
			printk(KERN_INFO "get_smem_clock: timeout still "
				"invalid state %x clock %u\n",
				state, smem_clock_val);
			return 0;
		}
	}

	time_start(data);
	smsm_change_state(SMSM_APPS_STATE, SMSM_TIMEINIT, SMSM_TIMEWAIT);
	do {
		smem_clock_val = *smem_clock;
		state = smsm_get_state(SMSM_MODEM_STATE);
		if (state != last_state) {
			last_state = state;
			if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
				printk(KERN_INFO
					"get_smem_clock: state %x clock %u\n",
					state, smem_clock_val);
		}
	} while (smem_clock_val == 0 && !time_expired(data));

	if (smem_clock_val) {
		if (update != NULL)
			update(data, smem_clock_val);
	} else {
		printk(KERN_INFO "get_smem_clock: timeout state %x clock %u\n",
			state, smem_clock_val);
	}

	smsm_change_state(SMSM_APPS_STATE, SMSM_TIMEWAIT, SMSM_TIMEINIT);
	time_start(data);
	while (*smem_clock != 0 && !time_expired(data))
		;

	if (*smem_clock)
		printk(KERN_INFO "get_smem_clock: exit timeout state %x "
			"clock %u\n", state, *smem_clock);
	return smem_clock_val;
}
Beispiel #16
0
static uint32_t msm_timer_sync_sclk(
	void (*time_start)(struct msm_timer_sync_data_t *data),
	bool (*time_expired)(struct msm_timer_sync_data_t *data),
	void (*update)(struct msm_timer_sync_data_t *data, uint32_t clk_val),
	struct msm_timer_sync_data_t *data)
{
	/* Time Master State Bits */
	#define MASTER_BITS_PER_CPU        1
	#define MASTER_TIME_PENDING \
		(0x01UL << (MASTER_BITS_PER_CPU * SMSM_APPS_STATE))

	/* Time Slave State Bits */
	#define SLAVE_TIME_REQUEST         0x0400
	#define SLAVE_TIME_POLL            0x0800
	#define SLAVE_TIME_INIT            0x1000

	uint32_t *smem_clock;
	uint32_t smem_clock_val;
	uint32_t state;

	smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE, sizeof(uint32_t));
	if (smem_clock == NULL) {
		printk(KERN_ERR "no smem clock\n");
		return 0;
	}

	state = smsm_get_state(SMSM_MODEM_STATE);
	if ((state & SMSM_INIT) == 0) {
		printk(KERN_ERR "smsm not initialized\n");
		return 0;
	}

	time_start(data);
	while ((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) &
		MASTER_TIME_PENDING) {
		if (time_expired(data)) {
			printk(KERN_INFO "get_smem_clock: timeout 1 still "
				"invalid state %x\n", state);
			return 0;
		}
	}

	smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_POLL | SLAVE_TIME_INIT,
		SLAVE_TIME_REQUEST);

	time_start(data);
	while (!((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) &
		MASTER_TIME_PENDING)) {
		if (time_expired(data)) {
			printk(KERN_INFO "get_smem_clock: timeout 2 still "
				"invalid state %x\n", state);
			smem_clock_val = 0;
			goto sync_sclk_exit;
		}
	}

	smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST, SLAVE_TIME_POLL);

	time_start(data);
	do {
		smem_clock_val = *smem_clock;
	} while (smem_clock_val == 0 && !time_expired(data));

	state = smsm_get_state(SMSM_TIME_MASTER_DEM);

	if (smem_clock_val) {
		if (update != NULL)
			update(data, smem_clock_val);

		if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
			printk(KERN_INFO
				"get_smem_clock: state %x clock %u\n",
				state, smem_clock_val);
	} else {
		printk(KERN_INFO "get_smem_clock: timeout state %x clock %u\n",
			state, smem_clock_val);
	}

sync_sclk_exit:
	smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST | SLAVE_TIME_POLL,
		SLAVE_TIME_INIT);
	return smem_clock_val;
}
Beispiel #17
0
static int msp430_probe(struct i2c_client *client,
	const struct i2c_device_id *id)
{
	int i;
	int ret = 0;
	char rec_buf = 0;
	struct msp430_data *data;
	struct _batt_func batt_func = {0};
	acer_smem_flag_t *acer_smem_flag;

	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
		pr_err("%s: i2c_check_functionality failed!\n", __func__);
		ret = -EIO;
		goto exit_i2c_check_failed;
	}

	data = kzalloc(sizeof(struct msp430_data), GFP_KERNEL);
	if (data == NULL) {
		pr_err("%s: no memory for driver data\n", __func__);
		ret = -ENOMEM;
		goto exit_kzalloc_failed;
	}
	i2c_set_clientdata(client, data);
	data->client = client;
	data->is_mcu_ready = 0;
	data->led_buf[0] = 0;
	data->led_buf[1] = 0;
	data->led_buf[2] = 0;
	data->batt_info.cap_percent = 255;
	data->is_first_read_cap_after_resume = 1;
	priv_data = data;

	acer_smem_flag = (acer_smem_flag_t *)(smem_alloc(SMEM_ID_VENDOR0,
		sizeof(acer_smem_flag_t)));
	if (acer_smem_flag == NULL) {
		pr_err("%s:alloc acer_smem_flag failed!\n", __func__);
		data->hw_ver = ACER_HW_VERSION_INVALID;
	} else
		data->hw_ver = acer_smem_flag->acer_hw_version;

	mutex_init(&data->gauge_mutex);
	mutex_init(&data->mutex);
	mutex_init(&data->i2c_mutex);
	INIT_WORK(&data->work, msp430_work_func);
	init_waitqueue_head(&data->gauge_wait);
	wake_lock_init(&data->cap_zero_wlock, WAKE_LOCK_SUSPEND, "batt_zero(full)_lock");
	wake_lock_init(&data->mcu_update_wlock, WAKE_LOCK_SUSPEND, "mcu_update_lock");

	setup_timer(&data->avg_timer, avg_timer_expired, 0);
	INIT_WORK(&data->avg_work, avg_timer_work);

	/* connect with battery driver */
	batt_func.get_battery_info = _get_battery_info;
	register_bat_func(&batt_func);
	if (data && batt_func.battery_isr_hander)
		data->battery_isr_hander = batt_func.battery_isr_hander;
	else
		pr_err("%s:register battery function(battery_isr_hander) error!\n", __func__);

	if (data && batt_func.get_charger_type)
		data->get_charger_type = batt_func.get_charger_type;
	else
		pr_err("%s:register battery function(get_charger_type) error!\n", __func__);


	/* Input register */
	data->input = input_allocate_device();
	if (!data->input) {
		pr_err("%s: input_allocate_device failed!\n", __func__);
		ret = -ENOMEM;
		goto exit_input_allocate_failed;
	}

	data->input->name = "a5-msp430-keypad";
	data->keymap_size = ARRAY_SIZE(msp430_keymap);
	for (i = 1; i < data->keymap_size; i++)
		input_set_capability(data->input, EV_KEY, msp430_keymap[i]);

	ret = input_register_device(data->input);
	if (ret) {
		pr_err("%s input_register_device failed!\n", __func__);
		goto exit_input_register_failed;
	}

	/*  Link interrupt routine with the irq */
	if (client->irq) {
		ret = request_irq(client->irq, msp430_interrupt,
				IRQF_TRIGGER_RISING, MSP430_DRIVER_NAME, data);
		if (ret < 0) {
			pr_err("%s: request_irq failed!\n", __func__);
			goto exit_irq_request_failed;
		} else
			enable_irq_wake(client->irq);
	}

	/* Read MCU Version */
	ret = reg_read(client, REG_SYSTEM_VERSION, &rec_buf);
	if (ret < 0) {
		ret = -EIO;
		goto exit_read_failed;
	}
	pr_info("MCU msp430 Version = %d\n", rec_buf);
	data->version = rec_buf;

	if (data->version > 0x18)
		data->led_buf[0] = SYS_MODE_RESERVED;
	else
		data->led_buf[0] = SYS_MODE_NORMAL;

	if (data->hw_ver != ACER_HW_VERSION_INVALID)
		update_mcu_delayed(14000);

	/*version=0xff means firmware is broken,
	 *it should be fixed by update_mcu_delayed*/
	if (data->version != 0xff) {
		mcu_hw_config(client, data->version);
		data->is_mcu_ready = 1;
		battery_info_change();
		mod_timer(&data->avg_timer, jiffies + msecs_to_jiffies(POLLING_TIME - 5000));
	}

#ifdef CONFIG_HAS_EARLYSUSPEND
	data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN;
	data->early_suspend.suspend = msp430_early_suspend;
	data->early_suspend.resume = msp430_late_resume;
	register_early_suspend(&data->early_suspend);
#endif

	return 0;

exit_read_failed:
	free_irq(client->irq, &data);

exit_irq_request_failed:
	input_unregister_device(data->input);

exit_input_register_failed:
	input_free_device(data->input);

exit_input_allocate_failed:
	kfree(data);

exit_kzalloc_failed:
exit_i2c_check_failed:

	return ret;
}
Beispiel #18
0
/*----------------------------------------------------------
 *	pipe_ioctl
 *---------------------------------------------------------*/
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{

#if 1 // FUJITA
	pipe_ioctl_cmd_union __user *argp = (pipe_ioctl_cmd_union __user *)arg;
	pipe_ioctl_cmd_union param;
	unsigned int pipe_smem_id;
	unsigned char *smem_ptr = NULL;
	long ret;
	int i;

	if (copy_from_user(&param, argp,sizeof(pipe_ioctl_cmd_union))) {
		printk(KERN_ERR "pipe_ioctl:%s: copy_from_user failed 1\n", __func__ );
		return (-EFAULT);
	}
	switch(cmd) {
		case PIPE_IOCTL_00:
			if (param.pipe_cmd0_union.pipe_user_id < SMEM_ID_TBL_MAX){
				pipe_smem_id = _smem_id_tbl[param.pipe_cmd0_union.pipe_user_id];

			}else{
				pipe_smem_id = 0xFFFF;
				printk(KERN_ERR "PIPE_IOCTL_CMD_00: user id invalid. uid=%d\n", param.pipe_cmd0_union.pipe_user_id);
				return (-EFAULT);
			}
			if (copy_to_user(param.pipe_cmd0_union.ppipe_smem_id, &pipe_smem_id, sizeof(pipe_smem_id))) {
				printk(KERN_ERR "PIPE_IOCTL_CMD_00: copy_to_user failed. uid=%d\n",param.pipe_cmd0_union.pipe_user_id);
				return (-EFAULT);
			}
			break;
		case PIPE_IOCTL_01:
			ret = pipe_size_check(param.pipe_cmd1_union);
			if(ret)
				return ret;
			if(copy_from_user(pipe_local_buf, param.pipe_cmd1_union.pipe_user_buf, param.pipe_cmd1_union.pipe_user_size)) {
				printk(KERN_ERR "PIPE_IOCTL_CMD_01: copy_from_user failed.\n");
				return (-EFAULT);
			}
			/* SMEM access*/
#if 1 // FUJITA
			smem_ptr = (unsigned char *)smem_alloc_vendor1(param.pipe_cmd1_union.pipe_smem_id);
#else
			smem_ptr = (unsigned char *)smem_alloc(param.pipe_cmd1_union.pipe_smem_id, param.pipe_cmd1_union.pipe_smem_size); 
#endif
			if(smem_ptr == NULL){
				printk(KERN_ERR "PIPE_IOCTL_CMD_01: failed to get smem_id=%x\n", param.pipe_cmd1_union.pipe_smem_id);
				return (-EIO);
			}

			/* data stor */
			smem_ptr += param.pipe_cmd1_union.pipe_smem_offset;
			for(i = 0; i < param.pipe_cmd1_union.pipe_user_size; i++){
				*smem_ptr++ = pipe_local_buf[i];
			}
			break;
		case PIPE_IOCTL_02:
			ret = pipe_size_check(param.pipe_cmd1_union);
			if(ret)
				return ret;
			/* SMEM access*/
#if 1 // FUJITA
			smem_ptr = (unsigned char *)smem_alloc_vendor1(param.pipe_cmd1_union.pipe_smem_id);
#else
			smem_ptr = (unsigned char *)smem_alloc(param.pipe_cmd1_union.pipe_smem_id, param.pipe_cmd1_union.pipe_smem_size); 
#endif
			if(smem_ptr == NULL){
				printk(KERN_ERR "PIPE_IOCTL_CMD_02: failed to get pipe_smem_id=%x\n", param.pipe_cmd1_union.pipe_smem_id);
				return (-EIO);
			}

			/* data stor */
			smem_ptr += param.pipe_cmd1_union.pipe_smem_offset;
			if(copy_to_user(param.pipe_cmd1_union.pipe_user_buf, smem_ptr, param.pipe_cmd1_union.pipe_user_size)) {
				printk(KERN_ERR "PIPE_IOCTL_CMD_02: copy_to_user failed.\n");
				return (-EFAULT);
			}
			break;
		default:
			printk(KERN_ERR "pipe_ioctl:illegal command. %d\n",cmd);
			return (-EINVAL);
	}
#endif
	return 0;
}
void msm_gpio_enter_sleep(int from_idle)
{
	int i;
	struct tramp_gpio_smem *smem_gpio;

	smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio));

	if (smem_gpio) {
		for (i = 0; i < ARRAY_SIZE(smem_gpio->enabled); i++) {
			smem_gpio->enabled[i] = 0;
			smem_gpio->detection[i] = 0;
			smem_gpio->polarity[i] = 0;
		}
	}

	for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
		unsigned int int_en = msm_gpio_chips[i].int_enable[!from_idle] & ~msm_gpio_chips[i].int_enable_mask[!from_idle];
		__raw_writel(int_en, msm_gpio_chips[i].regs.int_en);
		if ((msm_gpio_debug_mask & GPIO_DEBUG_SLEEP) && !from_idle)
			printk(KERN_INFO "gpio[%3d,%3d]: int_enable=0x%08x int_mask_en=0x%08x int_edge=0x%8p int_pos=0x%8p\n",
				msm_gpio_chips[i].chip.base,
				msm_gpio_chips[i].chip.base + msm_gpio_chips[i].chip.ngpio - 1,
				msm_gpio_chips[i].int_enable[!from_idle],
				msm_gpio_chips[i].int_enable_mask[!from_idle],
				msm_gpio_chips[i].regs.int_edge,
				msm_gpio_chips[i].regs.int_pos);
		if (smem_gpio) {
			uint32_t tmp;
			int start, index, shiftl, shiftr;
			start = msm_gpio_chips[i].chip.base;
			index = start / 32;
			shiftl = start % 32;
			shiftr = 32 - shiftl;
			tmp = msm_gpio_chips[i].int_enable[!from_idle];
			smem_gpio->enabled[index] |= tmp << shiftl;
			smem_gpio->enabled[index+1] |= tmp >> shiftr;
			smem_gpio->detection[index] |=
				__raw_readl(msm_gpio_chips[i].regs.int_edge) <<
				shiftl;
			smem_gpio->detection[index+1] |=
				__raw_readl(msm_gpio_chips[i].regs.int_edge) >>
				shiftr;
			smem_gpio->polarity[index] |=
				__raw_readl(msm_gpio_chips[i].regs.int_pos) <<
				shiftl;
			smem_gpio->polarity[index+1] |=
				__raw_readl(msm_gpio_chips[i].regs.int_pos) >>
				shiftr;
		}
	}
	mb();

	if (smem_gpio) {
		if (msm_gpio_debug_mask & GPIO_DEBUG_SLEEP)
			for (i = 0; i < ARRAY_SIZE(smem_gpio->enabled); i++) {
				printk("msm_gpio_enter_sleep gpio %d-%d: enable"
				       " %08x, edge %08x, polarity %08x\n",
				       i * 32, i * 32 + 31,
				       smem_gpio->enabled[i],
				       smem_gpio->detection[i],
				       smem_gpio->polarity[i]);
			}
		for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++)
			smem_gpio->num_fired[i] = 0;
	}
}
Beispiel #20
0
static int sblock_thread(void *data)
{
	struct sblock_mgr *sblock = data;
	struct smsg mcmd, mrecv;
	int rval;
	int recovery = 0;
	struct sched_param param = {.sched_priority = 90};

	/*set the thread as a real time thread, and its priority is 90*/
	sched_setscheduler(current, SCHED_RR, &param);

	/* since the channel open may hang, we call it in the sblock thread */
	rval = smsg_ch_open(sblock->dst, sblock->channel, -1);
	if (rval != 0) {
		printk(KERN_ERR "Failed to open channel %d\n", sblock->channel);
		/* assign NULL to thread poniter as failed to open channel */
		sblock->thread = NULL;
		return rval;
	}

	/* handle the sblock events */
	while (!kthread_should_stop()) {

		/* monitor sblock recv smsg */
		smsg_set(&mrecv, sblock->channel, 0, 0, 0);
		rval = smsg_recv(sblock->dst, &mrecv, -1);
		if (rval == -EIO || rval == -ENODEV) {
		/* channel state is FREE */
			msleep(5);
			continue;
		}

		pr_debug("sblock thread recv msg: dst=%d, channel=%d, "
				"type=%d, flag=0x%04x, value=0x%08x\n",
				sblock->dst, sblock->channel,
				mrecv.type, mrecv.flag, mrecv.value);

		switch (mrecv.type) {
		case SMSG_TYPE_OPEN:
			/* handle channel recovery */
			if (recovery) {
				if (sblock->handler) {
					sblock->handler(SBLOCK_NOTIFY_CLOSE, sblock->data);
				}
				sblock_recover(sblock->dst, sblock->channel);
			}
			smsg_open_ack(sblock->dst, sblock->channel);
			break;
		case SMSG_TYPE_CLOSE:
			/* handle channel recovery */
			smsg_close_ack(sblock->dst, sblock->channel);
			if (sblock->handler) {
				sblock->handler(SBLOCK_NOTIFY_CLOSE, sblock->data);
			}
			sblock->state = SBLOCK_STATE_IDLE;
			break;
		case SMSG_TYPE_CMD:
			/* respond cmd done for sblock init */
			WARN_ON(mrecv.flag != SMSG_CMD_SBLOCK_INIT);
			smsg_set(&mcmd, sblock->channel, SMSG_TYPE_DONE,
					SMSG_DONE_SBLOCK_INIT, sblock->smem_addr);
			smsg_send(sblock->dst, &mcmd, -1);
			if (sblock->handler) {
				sblock->handler(SBLOCK_NOTIFY_OPEN, sblock->data);
			}
			sblock->state = SBLOCK_STATE_READY;
			recovery = 1;
			break;
		case SMSG_TYPE_EVENT:
			/* handle sblock send/release events */
			switch (mrecv.flag) {
			case SMSG_EVENT_SBLOCK_SEND:
				wake_up_interruptible_all(&sblock->ring->recvwait);
				if (sblock->handler) {
					sblock->handler(SBLOCK_NOTIFY_RECV, sblock->data);
				}
				break;
			case SMSG_EVENT_SBLOCK_RELEASE:
				wake_up_interruptible_all(&(sblock->ring->getwait));
				if (sblock->handler) {
					sblock->handler(SBLOCK_NOTIFY_GET, sblock->data);
				}
				break;
			default:
				rval = 1;
				break;
			}
			break;
		default:
			rval = 1;
			break;
		};
		if (rval) {
			printk(KERN_WARNING "non-handled sblock msg: %d-%d, %d, %d, %d\n",
					sblock->dst, sblock->channel,
					mrecv.type, mrecv.flag, mrecv.value);
			rval = 0;
		}
	}

	printk(KERN_WARNING "sblock %d-%d thread stop", sblock->dst, sblock->channel);
	return rval;
}

int sblock_create(uint8_t dst, uint8_t channel,
		uint32_t txblocknum, uint32_t txblocksize,
		uint32_t rxblocknum, uint32_t rxblocksize)
{
	struct sblock_mgr *sblock = NULL;
	volatile struct sblock_ring_header *ringhd = NULL;
	volatile struct sblock_ring_header *poolhd = NULL;
	uint32_t hsize;
	int i, result;

	sblock = kzalloc(sizeof(struct sblock_mgr) , GFP_KERNEL);
	if (!sblock) {
		return -ENOMEM;
	}

	sblock->state = SBLOCK_STATE_IDLE;
	sblock->dst = dst;
	sblock->channel = channel;
	sblock->txblksz = txblocksize;
	sblock->rxblksz = rxblocksize;
	sblock->txblknum = txblocknum;
	sblock->rxblknum = rxblocknum;


	/* allocate smem */
	hsize = sizeof(struct sblock_header);
	sblock->smem_size = hsize +						/* for header*/
		txblocknum * txblocksize + rxblocknum * rxblocksize + 		/* for blks */
		(txblocknum + rxblocknum) * sizeof(struct sblock_blks) + 	/* for ring*/
		(txblocknum + rxblocknum) * sizeof(struct sblock_blks); 	/* for pool*/

	sblock->smem_addr = smem_alloc(sblock->smem_size);
	if (!sblock->smem_addr) {
		printk(KERN_ERR "Failed to allocate smem for sblock\n");
		kfree(sblock);
		return -ENOMEM;
	}
	sblock->smem_virt = ioremap(sblock->smem_addr, sblock->smem_size);
	if (!sblock->smem_virt) {
		printk(KERN_ERR "Failed to map smem for sblock\n");
		smem_free(sblock->smem_addr, sblock->smem_size);
		kfree(sblock);
		return -EFAULT;
	}

	/* initialize ring and header */
	sblock->ring = kzalloc(sizeof(struct sblock_ring), GFP_KERNEL);
	if (!sblock->ring) {
		printk(KERN_ERR "Failed to allocate ring for sblock\n");
		iounmap(sblock->smem_virt);
		smem_free(sblock->smem_addr, sblock->smem_size);
		kfree(sblock);
		return -ENOMEM;
	}
	ringhd = (volatile struct sblock_ring_header *)(sblock->smem_virt);
	ringhd->txblk_addr = sblock->smem_addr + hsize;
	ringhd->txblk_count = txblocknum;
	ringhd->txblk_size = txblocksize;
	ringhd->txblk_rdptr = 0;
	ringhd->txblk_wrptr = 0;
	ringhd->txblk_blks = sblock->smem_addr + hsize +
		txblocknum * txblocksize + rxblocknum * rxblocksize;
	ringhd->rxblk_addr = ringhd->txblk_addr + txblocknum * txblocksize;
	ringhd->rxblk_count = rxblocknum;
	ringhd->rxblk_size = rxblocksize;
	ringhd->rxblk_rdptr = 0;
	ringhd->rxblk_wrptr = 0;
	ringhd->rxblk_blks = ringhd->txblk_blks + txblocknum * sizeof(struct sblock_blks);

	poolhd = (volatile struct sblock_ring_header *)(sblock->smem_virt + sizeof(struct sblock_ring_header));
	poolhd->txblk_addr = sblock->smem_addr + hsize;
	poolhd->txblk_count = txblocknum;
	poolhd->txblk_size = txblocksize;
	poolhd->txblk_rdptr = 0;
	poolhd->txblk_wrptr = 0;
	poolhd->txblk_blks = ringhd->rxblk_blks + rxblocknum * sizeof(struct sblock_blks);
	poolhd->rxblk_addr = ringhd->txblk_addr + txblocknum * txblocksize;
	poolhd->rxblk_count = rxblocknum;
	poolhd->rxblk_size = rxblocksize;
	poolhd->rxblk_rdptr = 0;
	poolhd->rxblk_wrptr = 0;
	poolhd->rxblk_blks = poolhd->txblk_blks + txblocknum * sizeof(struct sblock_blks);

	sblock->ring->txrecord = kzalloc(sizeof(int) * txblocknum, GFP_KERNEL);
	if (!sblock->ring->txrecord) {
		printk(KERN_ERR "Failed to allocate memory for txrecord\n");
		iounmap(sblock->smem_virt);
		smem_free(sblock->smem_addr, sblock->smem_size);
		kfree(sblock->ring);
		kfree(sblock);
		return -ENOMEM;
	}

	sblock->ring->rxrecord = kzalloc(sizeof(int) * rxblocknum, GFP_KERNEL);
	if (!sblock->ring->rxrecord) {
		printk(KERN_ERR "Failed to allocate memory for rxrecord\n");
		iounmap(sblock->smem_virt);
		smem_free(sblock->smem_addr, sblock->smem_size);
		kfree(sblock->ring->txrecord);
		kfree(sblock->ring);
		kfree(sblock);
		return -ENOMEM;
	}

	sblock->ring->header = sblock->smem_virt;
	sblock->ring->txblk_virt = sblock->smem_virt +
		(ringhd->txblk_addr - sblock->smem_addr);
	sblock->ring->r_txblks = sblock->smem_virt +
		(ringhd->txblk_blks - sblock->smem_addr);
	sblock->ring->rxblk_virt = sblock->smem_virt +
		(ringhd->rxblk_addr - sblock->smem_addr);
	sblock->ring->r_rxblks = sblock->smem_virt +
		(ringhd->rxblk_blks - sblock->smem_addr);
	sblock->ring->p_txblks = sblock->smem_virt +
		(poolhd->txblk_blks - sblock->smem_addr);
	sblock->ring->p_rxblks = sblock->smem_virt +
		(poolhd->rxblk_blks - sblock->smem_addr);


	for (i = 0; i < txblocknum; i++) {
		sblock->ring->p_txblks[i].addr = poolhd->txblk_addr + i * txblocksize;
		sblock->ring->p_txblks[i].length = txblocksize;
		sblock->ring->txrecord[i] = SBLOCK_BLK_STATE_DONE;
		poolhd->txblk_wrptr++;
	}
	for (i = 0; i < rxblocknum; i++) {
		sblock->ring->p_rxblks[i].addr = poolhd->rxblk_addr + i * rxblocksize;
		sblock->ring->p_rxblks[i].length = rxblocksize;
		sblock->ring->rxrecord[i] = SBLOCK_BLK_STATE_DONE;
		poolhd->rxblk_wrptr++;
	}

	init_waitqueue_head(&sblock->ring->getwait);
	init_waitqueue_head(&sblock->ring->recvwait);
	spin_lock_init(&sblock->ring->r_txlock);
	spin_lock_init(&sblock->ring->r_rxlock);
	spin_lock_init(&sblock->ring->p_txlock);
	spin_lock_init(&sblock->ring->p_rxlock);

	sblock->thread = kthread_create(sblock_thread, sblock,
			"sblock-%d-%d", dst, channel);
	if (IS_ERR(sblock->thread)) {
		printk(KERN_ERR "Failed to create kthread: sblock-%d-%d\n", dst, channel);
		iounmap(sblock->smem_virt);
		smem_free(sblock->smem_addr, sblock->smem_size);
		kfree(sblock->ring->txrecord);
		kfree(sblock->ring->rxrecord);
		kfree(sblock->ring);
		result = PTR_ERR(sblock->thread);
		kfree(sblock);
		return result;
	}

	sblocks[dst][channel]=sblock;
	wake_up_process(sblock->thread);

	return 0;
}

void sblock_destroy(uint8_t dst, uint8_t channel)
{
	struct sblock_mgr *sblock = sblocks[dst][channel];

	if (sblock == NULL) {
		return;
	}

	sblock->state = SBLOCK_STATE_IDLE;
	smsg_ch_close(dst, channel, -1);

	/* stop sblock thread if it's created successfully and still alive */
	if (!IS_ERR_OR_NULL(sblock->thread)) {
		kthread_stop(sblock->thread);
	}

	if (sblock->ring) {
		wake_up_interruptible_all(&sblock->ring->recvwait);
		wake_up_interruptible_all(&sblock->ring->getwait);
		if (sblock->ring->txrecord) {
			kfree(sblock->ring->txrecord);
		}
		if (sblock->ring->rxrecord) {
			kfree(sblock->ring->rxrecord);
		}
		kfree(sblock->ring);
	}
	if (sblock->smem_virt) {
		iounmap(sblock->smem_virt);
	}
	smem_free(sblock->smem_addr, sblock->smem_size);
	kfree(sblock);

	sblocks[dst][channel]=NULL;
}

int sblock_register_notifier(uint8_t dst, uint8_t channel,
		void (*handler)(int event, void *data), void *data)
{
	struct sblock_mgr *sblock = sblocks[dst][channel];

	if (!sblock) {
		printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
		return -ENODEV;
	}
#ifndef CONFIG_SIPC_WCN
	if (sblock->handler) {
		printk(KERN_ERR "sblock handler already registered\n");
		return -EBUSY;
	}
#endif
	sblock->handler = handler;
	sblock->data = data;

	return 0;
}

int sblock_get(uint8_t dst, uint8_t channel, struct sblock *blk, int timeout)
{
	struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
	struct sblock_ring *ring = NULL;
	volatile struct sblock_ring_header *ringhd = NULL;
	volatile struct sblock_ring_header *poolhd = NULL;
	int txpos, index;
	int rval = 0;
	unsigned long flags;

	if (!sblock || sblock->state != SBLOCK_STATE_READY) {
		printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
		return sblock ? -EIO : -ENODEV;
	}

	ring = sblock->ring;
	ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
	poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);

	if (poolhd->txblk_rdptr == poolhd->txblk_wrptr) {
		if (timeout == 0) {
			/* no wait */
			printk(KERN_WARNING "sblock_get %d-%d is empty!\n",
				dst, channel);
			rval = -ENODATA;
		} else if (timeout < 0) {
			/* wait forever */
			rval = wait_event_interruptible(ring->getwait,
					poolhd->txblk_rdptr != poolhd->txblk_wrptr ||
					sblock->state == SBLOCK_STATE_IDLE);
			if (rval < 0) {
				printk(KERN_WARNING "sblock_get wait interrupted!\n");
			}

			if (sblock->state == SBLOCK_STATE_IDLE) {
				printk(KERN_ERR "sblock_get sblock state is idle!\n");
				rval = -EIO;
			}
		} else {
			/* wait timeout */
			rval = wait_event_interruptible_timeout(ring->getwait,
					poolhd->txblk_rdptr != poolhd->txblk_wrptr ||
					sblock == SBLOCK_STATE_IDLE,
					timeout);
			if (rval < 0) {
				printk(KERN_WARNING "sblock_get wait interrupted!\n");
			} else if (rval == 0) {
				printk(KERN_WARNING "sblock_get wait timeout!\n");
				rval = -ETIME;
			}

			if(sblock->state == SBLOCK_STATE_IDLE) {
				printk(KERN_ERR "sblock_get sblock state is idle!\n");
				rval = -EIO;
			}
		}
	}

	if (rval < 0) {
		return rval;
	}

	/* multi-gotter may cause got failure */
	spin_lock_irqsave(&ring->p_txlock, flags);
	if (poolhd->txblk_rdptr != poolhd->txblk_wrptr &&
			sblock->state == SBLOCK_STATE_READY) {
		txpos = sblock_get_ringpos(poolhd->txblk_rdptr, poolhd->txblk_count);
		blk->addr = sblock->smem_virt + (ring->p_txblks[txpos].addr - sblock->smem_addr);
		blk->length = poolhd->txblk_size;
		poolhd->txblk_rdptr = poolhd->txblk_rdptr + 1;
		index = sblock_get_index((blk->addr - ring->txblk_virt), sblock->txblksz);
		ring->txrecord[index] = SBLOCK_BLK_STATE_PENDING;
	} else {
		rval = sblock->state == SBLOCK_STATE_READY ? -EAGAIN : -EIO;
	}
	spin_unlock_irqrestore(&ring->p_txlock, flags);

	return rval;
}

static int sblock_send_ex(uint8_t dst, uint8_t channel, struct sblock *blk, bool yell)
{
	struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
	struct sblock_ring *ring;
	volatile struct sblock_ring_header *ringhd;
	struct smsg mevt;
	int txpos, index;
	int rval = 0;
	unsigned long flags;

	if (!sblock || sblock->state != SBLOCK_STATE_READY) {
		printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
		return sblock ? -EIO : -ENODEV;
	}

	pr_debug("sblock_send: dst=%d, channel=%d, addr=%p, len=%d\n",
			dst, channel, blk->addr, blk->length);

	ring = sblock->ring;
	ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);

	spin_lock_irqsave(&ring->r_txlock, flags);

	txpos = sblock_get_ringpos(ringhd->txblk_wrptr, ringhd->txblk_count);
	ring->r_txblks[txpos].addr = blk->addr - sblock->smem_virt + sblock->smem_addr;
	ring->r_txblks[txpos].length = blk->length;
	pr_debug("sblock_send: channel=%d, wrptr=%d, txpos=%d, addr=%x\n",
			channel, ringhd->txblk_wrptr, txpos, ring->r_txblks[txpos].addr);
	ringhd->txblk_wrptr = ringhd->txblk_wrptr + 1;
	if (yell && sblock->state == SBLOCK_STATE_READY) {
		smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBLOCK_SEND, 0);
		rval = smsg_send(dst, &mevt, 0);
	}
	index = sblock_get_index((blk->addr - ring->txblk_virt), sblock->txblksz);
	ring->txrecord[index] = SBLOCK_BLK_STATE_DONE;

	spin_unlock_irqrestore(&ring->r_txlock, flags);

	return rval ;
}

int sblock_send(uint8_t dst, uint8_t channel, struct sblock *blk)
{
	return sblock_send_ex(dst, channel, blk, true);
}

int sblock_send_prepare(uint8_t dst, uint8_t channel, struct sblock *blk)
{
	return sblock_send_ex(dst, channel, blk, false);
}

int sblock_send_finish(uint8_t dst, uint8_t channel)
{
	struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
	struct sblock_ring *ring;
	volatile struct sblock_ring_header *ringhd;
	struct smsg mevt;
	int rval;

	if (!sblock || sblock->state != SBLOCK_STATE_READY) {
		printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
		return sblock ? -EIO : -ENODEV;
	}

	ring = sblock->ring;
	ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);

	if (ringhd->txblk_wrptr != ringhd->txblk_rdptr) {
		smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBLOCK_SEND, 0);
		rval = smsg_send(dst, &mevt, 0);
	}

	return rval;
}

int sblock_receive(uint8_t dst, uint8_t channel, struct sblock *blk, int timeout)
{
	struct sblock_mgr *sblock = sblocks[dst][channel];
	struct sblock_ring *ring;
	volatile struct sblock_ring_header *ringhd;
	int rxpos, index, rval = 0;
	unsigned long flags;

	if (!sblock || sblock->state != SBLOCK_STATE_READY) {
		printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
		return sblock ? -EIO : -ENODEV;
	}

	ring = sblock->ring;
	ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);

	pr_debug("sblock_receive: dst=%d, channel=%d, timeout=%d\n",
			dst, channel, timeout);
	pr_debug("sblock_receive: channel=%d, wrptr=%d, rdptr=%d",
			channel, ringhd->rxblk_wrptr, ringhd->rxblk_rdptr);

	if (ringhd->rxblk_wrptr == ringhd->rxblk_rdptr) {
		if (timeout == 0) {
			/* no wait */
			pr_debug("sblock_receive %d-%d is empty!\n",
				dst, channel);
			rval = -ENODATA;
		} else if (timeout < 0) {
			/* wait forever */
			rval = wait_event_interruptible(ring->recvwait,
				ringhd->rxblk_wrptr != ringhd->rxblk_rdptr);
			if (rval < 0) {
				printk(KERN_WARNING "sblock_receive wait interrupted!\n");
			}

			if (sblock->state == SBLOCK_STATE_IDLE) {
				printk(KERN_ERR "sblock_receive sblock state is idle!\n");
				rval = -EIO;
			}

		} else {
			/* wait timeout */
			rval = wait_event_interruptible_timeout(ring->recvwait,
				ringhd->rxblk_wrptr != ringhd->rxblk_rdptr, timeout);
			if (rval < 0) {
				printk(KERN_WARNING "sblock_receive wait interrupted!\n");
			} else if (rval == 0) {
				printk(KERN_WARNING "sblock_receive wait timeout!\n");
				rval = -ETIME;
			}

			if (sblock->state == SBLOCK_STATE_IDLE) {
				printk(KERN_ERR "sblock_receive sblock state is idle!\n");
				rval = -EIO;
			}
		}
	}

	if (rval < 0) {
		return rval;
	}

	/* multi-receiver may cause recv failure */
	spin_lock_irqsave(&ring->r_rxlock, flags);

	if (ringhd->rxblk_wrptr != ringhd->rxblk_rdptr &&
			sblock->state == SBLOCK_STATE_READY) {
		rxpos = sblock_get_ringpos(ringhd->rxblk_rdptr, ringhd->rxblk_count);
		blk->addr = ring->r_rxblks[rxpos].addr - sblock->smem_addr + sblock->smem_virt;
		blk->length = ring->r_rxblks[rxpos].length;
		ringhd->rxblk_rdptr = ringhd->rxblk_rdptr + 1;
		pr_debug("sblock_receive: channel=%d, rxpos=%d, addr=%p, len=%d\n",
			channel, rxpos, blk->addr, blk->length);
		index = sblock_get_index((blk->addr - ring->rxblk_virt), sblock->rxblksz);
		ring->rxrecord[index] = SBLOCK_BLK_STATE_PENDING;
	} else {
		rval = sblock->state == SBLOCK_STATE_READY ? -EAGAIN : -EIO;
	}
	spin_unlock_irqrestore(&ring->r_rxlock, flags);

	return rval;
}

int sblock_get_free_count(uint8_t dst, uint8_t channel)
{
	struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
	struct sblock_ring *ring = NULL;
	volatile struct sblock_ring_header *poolhd = NULL;
	int blk_count = 0;
	unsigned long flags;

	if (!sblock || sblock->state != SBLOCK_STATE_READY) {
		printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
		return -ENODEV;
	}

	ring = sblock->ring;
	poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);

	spin_lock_irqsave(&ring->p_txlock, flags);
	blk_count = (int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr);
	spin_unlock_irqrestore(&ring->p_txlock, flags);

	return blk_count;
}

int sblock_release(uint8_t dst, uint8_t channel, struct sblock *blk)
{
	struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
	struct sblock_ring *ring = NULL;
	volatile struct sblock_ring_header *ringhd = NULL;
	volatile struct sblock_ring_header *poolhd = NULL;
	struct smsg mevt;
	unsigned long flags;
	int rxpos;
	int index;

	if (!sblock || sblock->state != SBLOCK_STATE_READY) {
		printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
		return -ENODEV;
	}

	pr_debug("sblock_release: dst=%d, channel=%d, addr=%p, len=%d\n",
			dst, channel, blk->addr, blk->length);

	ring = sblock->ring;
	ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
	poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);

	spin_lock_irqsave(&ring->p_rxlock, flags);
	rxpos = sblock_get_ringpos(poolhd->rxblk_wrptr, poolhd->rxblk_count);
	ring->p_rxblks[rxpos].addr = blk->addr - sblock->smem_virt + sblock->smem_addr;
	ring->p_rxblks[rxpos].length = poolhd->rxblk_size;
	poolhd->rxblk_wrptr = poolhd->rxblk_wrptr + 1;
	pr_debug("sblock_release: addr=%x\n", ring->p_rxblks[rxpos].addr);

	if((int)(poolhd->rxblk_wrptr - poolhd->rxblk_rdptr) == 1 &&
			sblock->state == SBLOCK_STATE_READY) {
		/* send smsg to notify the peer side */
		smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBLOCK_RELEASE, 0);
		smsg_send(dst, &mevt, -1);
	}

	index = sblock_get_index((blk->addr - ring->rxblk_virt), sblock->rxblksz);
	ring->rxrecord[index] = SBLOCK_BLK_STATE_DONE;

	spin_unlock_irqrestore(&ring->p_rxlock, flags);

	return 0;
}

#if defined(CONFIG_DEBUG_FS)
static int sblock_debug_show(struct seq_file *m, void *private)
{
	struct sblock_mgr *sblock = NULL;
	struct sblock_ring  *ring = NULL;
	volatile struct sblock_ring_header *ringhd = NULL;
	volatile struct sblock_ring_header *poolhd = NULL;
	int i, j;

	for (i = 0; i < SIPC_ID_NR; i++) {
		for (j=0;  j < SMSG_CH_NR; j++) {
			sblock = sblocks[i][j];
			if (!sblock) {
				continue;
			}
			ring = sblock->ring;
			ringhd = (volatile struct sblock_ring_header *)(&sblock->ring->header->ring);
			poolhd = (volatile struct sblock_ring_header *)(&sblock->ring->header->pool);

			seq_printf(m, "sblock dst 0x%0x, channel: 0x%0x, state: %d, smem_virt: 0x%0x, smem_addr: 0x%0x, smem_size: 0x%0x, txblksz: %d, rxblksz: %d \n",
				sblock->dst, sblock->channel, sblock->state,
				(uint32_t)sblock->smem_virt, sblock->smem_addr,
				sblock->smem_size, sblock->txblksz, sblock->rxblksz );
			seq_printf(m, "sblock ring: txblk_virt :0x%0x, rxblk_virt :0x%0x \n",
				(uint32_t)ring->txblk_virt, (uint32_t)ring->rxblk_virt);
			seq_printf(m, "sblock ring header: rxblk_addr :0x%0x, rxblk_rdptr :0x%0x, rxblk_wrptr :0x%0x, rxblk_size :%d, rxblk_count :%d, rxblk_blks: 0x%0x \n",
				ringhd->rxblk_addr, ringhd->rxblk_rdptr,
				ringhd->rxblk_wrptr, ringhd->rxblk_size,
				ringhd->rxblk_count, ringhd->rxblk_blks);
			seq_printf(m, "sblock ring header: txblk_addr :0x%0x, txblk_rdptr :0x%0x, txblk_wrptr :0x%0x, txblk_size :%d, txblk_count :%d, txblk_blks: 0x%0x \n",
				ringhd->txblk_addr, ringhd->txblk_rdptr,
				ringhd->txblk_wrptr, ringhd->txblk_size,
				ringhd->txblk_count, ringhd->txblk_blks );
			seq_printf(m, "sblock pool header: rxblk_addr :0x%0x, rxblk_rdptr :0x%0x, rxblk_wrptr :0x%0x, rxblk_size :%d, rxpool_count :%d, rxblk_blks: 0x%0x \n",
				poolhd->rxblk_addr, poolhd->rxblk_rdptr,
				poolhd->rxblk_wrptr, poolhd->rxblk_size,
				(int)(poolhd->rxblk_wrptr - poolhd->rxblk_rdptr),
				poolhd->rxblk_blks);
			seq_printf(m, "sblock pool header: txblk_addr :0x%0x, txblk_rdptr :0x%0x, txblk_wrptr :0x%0x, txblk_size :%d, txpool_count :%d, txblk_blks: 0x%0x \n",
				poolhd->txblk_addr, poolhd->txblk_rdptr,
				poolhd->txblk_wrptr, poolhd->txblk_size,
				(int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr),
				poolhd->txblk_blks );
		}
	}
	return 0;

}

static int sblock_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, sblock_debug_show, inode->i_private);
}

static const struct file_operations sblock_debug_fops = {
	.open = sblock_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

int  sblock_init_debugfs(void *root )
{
	if (!root)
		return -ENXIO;
	debugfs_create_file("sblock", S_IRUGO, (struct dentry *)root, NULL, &sblock_debug_fops);
	return 0;
}

#endif /* CONFIG_DEBUG_FS */

EXPORT_SYMBOL(sblock_put);
EXPORT_SYMBOL(sblock_create);
EXPORT_SYMBOL(sblock_destroy);
EXPORT_SYMBOL(sblock_register_notifier);
EXPORT_SYMBOL(sblock_get);
EXPORT_SYMBOL(sblock_send);
EXPORT_SYMBOL(sblock_send_prepare);
EXPORT_SYMBOL(sblock_send_finish);
EXPORT_SYMBOL(sblock_receive);
EXPORT_SYMBOL(sblock_get_free_count);
EXPORT_SYMBOL(sblock_release);

MODULE_AUTHOR("Chen Gaopeng");
MODULE_DESCRIPTION("SIPC/SBLOCK driver");
MODULE_LICENSE("GPL");
void smsm_print_sleep_info(unsigned wakeup_reason_only)
{
	unsigned long flags;
	uint32_t *ptr;
#if defined(CONFIG_MSM_N_WAY_SMD)
	struct msm_dem_slave_data *smd_int_info;
#else
	struct tramp_gpio_smem *gpio;
	struct smsm_interrupt_info *int_info;
#endif

	spin_lock_irqsave(&smem_lock, flags);

	if (!wakeup_reason_only) {
		ptr = smem_alloc(SMEM_SMSM_SLEEP_DELAY, sizeof(*ptr));
		if (ptr)
			pr_info("SMEM_SMSM_SLEEP_DELAY: %x\n", *ptr);

		ptr = smem_alloc(SMEM_SMSM_LIMIT_SLEEP, sizeof(*ptr));
		if (ptr)
			pr_info("SMEM_SMSM_LIMIT_SLEEP: %x\n", *ptr);

		ptr = smem_alloc(SMEM_SLEEP_POWER_COLLAPSE_DISABLED, sizeof(*ptr));
		if (ptr)
			pr_info("SMEM_SLEEP_POWER_COLLAPSE_DISABLED: %x\n", *ptr);
	}
#if !defined(CONFIG_MSM_N_WAY_SMD)
	int_info = smem_alloc(SMEM_SMSM_INT_INFO, sizeof(*int_info));
	if (int_info)
		pr_info("SMEM_SMSM_INT_INFO %x %x %x\n",
			int_info->interrupt_mask,
			int_info->pending_interrupts,
			int_info->wakeup_reason);

	gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*gpio));
	if (gpio) {
		int i;
		if (!wakeup_reason_only) {
			for (i = 0; i < NUM_GPIO_INT_REGISTERS; i++)
				pr_info("SMEM_GPIO_INT: %d: e %x d %x p %x\n",
					i, gpio->enabled[i], gpio->detection[i],
					gpio->polarity[i]);
		}
		for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++)
			pr_info("SMEM_GPIO_INT: %d: f %d: %d %d...\n",
				i, gpio->num_fired[i], gpio->fired[i][0],
				gpio->fired[i][1]);
	}
#else
	smd_int_info = smem_find(SMEM_APPS_DEM_SLAVE_DATA, sizeof(*smd_int_info));
	if (smd_int_info) {
		pr_info("SMEM_APPS_DEM_SLAVE_DATA: %ds %x %x %x %x %x %x %x %s %x\n",
			smd_int_info->sleep_time / 32768,
			smd_int_info->interrupt_mask,
			smd_int_info->resources_used,
			smd_int_info->reserved1,
			smd_int_info->wakeup_reason,
			smd_int_info->pending_interrupts,
			smd_int_info->rpc_prog,
			smd_int_info->rpc_proc,
			smd_int_info->smd_port_name,
			smd_int_info->reserved2);
	}
#endif
	spin_unlock_irqrestore(&smem_lock, flags);
}
static void smem_vendor1_init(void)
{
    smem_id_vendor1_ptr =  (oem_pm_smem_vendor1_data_type*)smem_alloc(SMEM_ID_VENDOR1,
                                                                      sizeof(oem_pm_smem_vendor1_data_type));
}
Beispiel #23
0
static int diagchar_ioctl(struct inode *inode, struct file *filp,
			   unsigned int iocmd, unsigned long ioarg)
{
	int i, j, count_entries = 0, temp;
	int success = -1;

	if (iocmd == DIAG_IOCTL_COMMAND_REG) {
		struct bindpkt_params_per_process *pkt_params =
			 (struct bindpkt_params_per_process *) ioarg;

		for (i = 0; i < diag_max_registration; i++) {
			if (driver->table[i].process_id == 0) {
				success = 1;
				driver->table[i].cmd_code =
					pkt_params->params->cmd_code;
				driver->table[i].subsys_id =
					pkt_params->params->subsys_id;
				driver->table[i].cmd_code_lo =
					pkt_params->params->cmd_code_hi;
				driver->table[i].cmd_code_hi =
					pkt_params->params->cmd_code_lo;
				driver->table[i].process_id = current->tgid;
				count_entries++;
				if (pkt_params->count > count_entries)
					pkt_params->params++;
				else
					return success;
			}
		}
		if (i < diag_threshold_registration) {
			/* Increase table size by amount required */
			diag_max_registration += pkt_params->count -
							 count_entries;
			/* Make sure size doesnt go beyond threshold */
			if (diag_max_registration > diag_threshold_registration)
				diag_max_registration =
						 diag_threshold_registration;
			driver->table = krealloc(driver->table,
					 diag_max_registration*sizeof(struct
					 diag_master_table), GFP_KERNEL);
			for (j = i; j < diag_max_registration; j++) {
				success = 1;
				driver->table[j].cmd_code = pkt_params->
							params->cmd_code;
				driver->table[j].subsys_id = pkt_params->
							params->subsys_id;
				driver->table[j].cmd_code_lo = pkt_params->
							params->cmd_code_hi;
				driver->table[j].cmd_code_hi = pkt_params->
							params->cmd_code_lo;
				driver->table[j].process_id = current->tgid;
				count_entries++;
				if (pkt_params->count > count_entries)
					pkt_params->params++;
				else
					return success;
			}
		} else
			pr_err("Max size reached, Pkt Registration failed for"
						" Process %d", current->tgid);

		success = 0;
	} else if (iocmd == DIAG_IOCTL_GET_DELAYED_RSP_ID) {
		struct diagpkt_delay_params *delay_params =
					(struct diagpkt_delay_params *) ioarg;

		if ((delay_params->rsp_ptr) &&
		 (delay_params->size == sizeof(delayed_rsp_id)) &&
				 (delay_params->num_bytes_ptr)) {
			*((uint16_t *)delay_params->rsp_ptr) =
				DIAGPKT_NEXT_DELAYED_RSP_ID(delayed_rsp_id);
			*(delay_params->num_bytes_ptr) = sizeof(delayed_rsp_id);
			success = 0;
		}
	} else if (iocmd == DIAG_IOCTL_LSM_DEINIT) {
		for (i = 0; i < driver->num_clients; i++)
			if (driver->client_map[i] == current->tgid)
				break;
		if (i == -1)
			return -EINVAL;
		driver->data_ready[i] |= DEINIT_TYPE;
		wake_up_interruptible(&driver->wait_q);
		success = 1;
	} else if (iocmd == DIAG_IOCTL_SWITCH_LOGGING) {
		mutex_lock(&driver->diagchar_mutex);
		temp = driver->logging_mode;
		driver->logging_mode = (int)ioarg;
		driver->logging_process_id = current->tgid;
		mutex_unlock(&driver->diagchar_mutex);
		if (temp == USB_MODE && driver->logging_mode == NO_LOGGING_MODE)
			diagfwd_disconnect();
		else if (temp == NO_LOGGING_MODE && driver->logging_mode
								== USB_MODE)
			diagfwd_connect();
		else if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
							== NO_LOGGING_MODE) {
			driver->in_busy = 1;
			driver->in_busy_qdsp = 1;
		} else if (temp == NO_LOGGING_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			driver->in_busy = 0;
			driver->in_busy_qdsp = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
		} else if (temp == USB_MODE && driver->logging_mode
							== MEMORY_DEVICE_MODE) {
			diagfwd_disconnect();
			driver->in_busy = 0;
			driver->in_busy_qdsp = 0;
			/* Poll SMD channels to check for data*/
			if (driver->ch)
				queue_work(driver->diag_wq,
					 &(driver->diag_read_smd_work));
			if (driver->chqdsp)
				queue_work(driver->diag_wq,
					&(driver->diag_read_smd_qdsp_work));
		} else if (temp == MEMORY_DEVICE_MODE && driver->logging_mode
								== USB_MODE)
			diagfwd_connect();
		success = 1;
	}
#ifdef CONFIG_FIH_FXX
#if SD_CARD_DOWNLOAD
    else if (iocmd == DIAG_IOCTL_WRITE_BUFFER) 
    {
        struct diagpkt_ioctl_param pkt;
        uint8_t *pBuf = NULL;
        if (copy_from_user(&pkt, (void __user *)ioarg, sizeof(pkt)))
        {
            return -EFAULT;
        }
        if ((pBuf = kzalloc(4096, GFP_KERNEL)) == NULL)
            return -EFAULT;

        memcpy(pBuf, pkt.pPacket, pkt.Len);
        //print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, pBuf, pkt.Len, 0);
        diag_write_to_smd(pBuf, pkt.Len);
        kfree(pBuf);
        return 0;
    }
    else if (iocmd == DIAG_IOCTL_READ_BUFFER) 
    {
    struct diagpkt_ioctl_param pkt;
    struct diagpkt_ioctl_param *ppkt;
    uint8_t *pBuf = NULL;
        if (copy_from_user(&pkt, (void __user *)ioarg, sizeof(pkt)))
        {
            return -EFAULT;
        }

        if ((pBuf = kzalloc(4096, GFP_KERNEL)) == NULL)
            return -EFAULT;

        ppkt = (struct diagpkt_ioctl_param *)ioarg;
        //printk("%s: ppkt->pPacket %p ioarg 0x%lx pkt.pPacket %p \n", __func__, ppkt->pPacket, ioarg, pkt.pPacket);
        if (diag_read_from_smd(pBuf, &(pkt.Len)) < 0)
        {
            kfree(pBuf);
            return -EFAULT;
        }
        //printk("pkt.Len =%d\n", pkt.Len);
        
        if (copy_to_user((void __user *) &ppkt->Len, &pkt.Len, sizeof(pkt.Len)))
        {
            kfree(pBuf);
            return -EFAULT;
        }
        if (copy_to_user((void __user *) pkt.pPacket, pBuf, pkt.Len))
        {
            kfree(pBuf);
            return -EFAULT;
        }
        kfree(pBuf);
        return 0;
    }
    else if (iocmd == DIAG_IOCTL_PASS_FIRMWARE_LIST)
    {
    FirmwareList FL;
    FirmwareList * pFL = NULL;
    int size;
        if (copy_from_user(&FL, (void __user *)ioarg, sizeof(FL)))
        {
            return -EFAULT;
        }

    printk("update flag 0x%X\n",FL.iFLAG);
    printk("image %s\n",FL.pCOMBINED_IMAGE);
    printk("0x%08X 0x%08X\n", FL.aARMPRG_BIN[0], FL.aARMPRG_BIN[1]);
    printk("0x%08X 0x%08X\n", FL.aPARTITION[0], FL.aPARTITION[1]);
    printk("0x%08X 0x%08X\n", FL.aANDROID_BOOT[0], FL.aANDROID_BOOT[1]);
        // Fill smem_mem_type
        proc_comm_alloc_sd_dl_smem(0);
    
        size = sizeof(FirmwareList);
        //pFL = smem_get_entry(SMEM_SD_IMG_UPGRADE_STATUS, &size);
        pFL = smem_alloc(SMEM_SD_IMG_UPGRADE_STATUS, size);
    //printk("pFL 0x%08X 0x%08X\n", (uint32_t)pFL, size);
        if (pFL == NULL)
            return -EFAULT;
        memcpy(pFL, &FL, sizeof(FirmwareList));
        print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,16, 1, pFL, size, 0);
    printk("\nARMPRGBIN 0x%08X 0x%08X\n", pFL->aARMPRG_BIN[0], pFL->aARMPRG_BIN[1]);
    printk("0x%08X 0x%08X\n", pFL->aPARTITION[0], pFL->aPARTITION[1]);
    printk("0x%08X 0x%08X\n", pFL->aANDROID_BOOT[0], pFL->aANDROID_BOOT[1]);
        // Notify modem to write firmware list to NAND flash
        if (proc_comm_alloc_sd_dl_smem(1))
            return -EFAULT;

        return 0;
    }
    else if (iocmd == DIAG_IOCTL_GET_PART_TABLE_FROM_SMEM)
    {
        struct flash_partition_table *partition_table;
        //struct flash_partition_entry *part_entry;
        //struct mtd_partition *ptn = msm_nand_partitions;
        //char *name = msm_nand_names;
        //int part;

        partition_table = (struct flash_partition_table *)
            smem_alloc(SMEM_AARM_PARTITION_TABLE,
        	       sizeof(struct flash_partition_table));

        if (!partition_table) {
            printk(KERN_WARNING "%s: no flash partition table in shared "
                   "memory\n", __func__);
            return -ENOENT;
        }

        if ((partition_table->magic1 != (u32) FLASH_PART_MAGIC1) ||
            (partition_table->magic2 != (u32) FLASH_PART_MAGIC2) ||
            (partition_table->version != (u32) FLASH_PARTITION_VERSION))
        {
        	printk(KERN_WARNING "%s: version mismatch -- magic1=%#x, "
        	       "magic2=%#x, version=%#x\n", __func__,
        	       partition_table->magic1,
        	       partition_table->magic2,
        	       partition_table->version);
        	return -EFAULT;
        }
        if (copy_to_user((void __user *) ioarg, partition_table, sizeof(struct flash_partition_table)))
        {
            return -EFAULT;
        }

        return 0;
    }
    #endif	//SD_CARD_DOWNLOAD
#endif	//CONFIG_FIH_FXX
	return success;
}
Beispiel #24
0
static int testsmem_open(struct inode *inode, struct file *file)
{
  int result;
  long tout = 60000;

  result = msm_proc_comm(PCOM_OEM_001, NULL, NULL);
  msleep(100);

  if (tesmemdriver) {

    cmd_ptr = (test_maif_type *)smem_alloc( SMEM_ID_VENDOR2, sizeof(test_maif_type));
    if(cmd_ptr == NULL)
    {
       printk(KERN_INFO "testsmem_open not yet smem allocate \n");
       mdelay(10);
       cmd_ptr = (test_maif_type *)smem_alloc( SMEM_ID_VENDOR2, sizeof(test_maif_type));

      if(cmd_ptr == NULL)
      {
        printk(KERN_INFO "smem allocate err\n");
        return -ENOMEM;
      }
    }
    mutex_lock(&tesmemdriver->testsmem_mutex);

    Acmd      =  (ma_if_command_type *)&cmd_ptr->CmdBuf;
    Adisp_ptr =  (ma_if_disp_type *)&cmd_ptr->DisplayBuf;
    Asio_ptr  =  (ma_if_sio_type *)&cmd_ptr->SioBuf;
    Astatus   =  (int *)&cmd_ptr->Status;
    Areq_cmd  =  (ma_if_ask_type *)&cmd_ptr->Req_cmd;

    memset(Adisp_ptr,0,sizeof(ma_if_disp_type));
    Adisp_ptr->format = MA_IF_DISP_TYPE_14x15;
    Adisp_ptr->valid = TRUE;
    Adisp_ptr->line[0].valid = TRUE;
    memcpy(&Adisp_ptr->line[0].data[0], "               ", MA_DISP_WIDTH15);
    Adisp_ptr->line[1].valid = TRUE;
    memcpy(&Adisp_ptr->line[1].data[0], "               ", MA_DISP_WIDTH15);
    Adisp_ptr->line[2].valid = TRUE;
    memcpy(&Adisp_ptr->line[2].data[0], "               ", MA_DISP_WIDTH15);
    Adisp_ptr->line[3].valid = TRUE;
    memcpy(&Adisp_ptr->line[3].data[0], "               ", MA_DISP_WIDTH15);

    testsmem_diap_print();
    Acmd->cmd = 0;
    *Astatus = TEST_REQUEST;
    Areq_cmd->req_flg = FALSE;
    Areq_cmd->req_cmd_no = 0xFFFFFFFF;
    msleep(50);

    result = testsmem_send_command();

    while(tout > 0)
    {
      if( (*Astatus & TEST_RESP_END) != 0 )
      {
        break;
      }
      tout--;
      mdelay(10);
    }
    msleep(50);

    Acmd->cmd = 1;
    mutex_unlock(&tesmemdriver->testsmem_mutex);
    return 0;
  }
  return -EFAULT;
}
int check_smem_ers_status(void)
{
	struct proc_dir_entry *entry;

	smem_sreset = (uint32_t *)smem_alloc(SMEM_ID_VENDOR0, sizeof(uint64_t)*4);
	
	if((smem_sreset != NULL) && ((*smem_sreset ) != 0))
	{
		printk(KERN_INFO "smem_sreset => addr : 0x%X, value : 0x%X\n", (int)smem_sreset, *smem_sreset);
		smem_sreset_log = kzalloc(100, GFP_KERNEL);
		if (smem_sreset_log == NULL) {
			printk(KERN_ERR "smem_sreset_log allocation failed \n");
			smem_sreset_log_size = 0;
			return 0;
		}

	if(*smem_sreset == 0xDDDEADDD)
			{
				//1 create a file to notify kernel crash
				printk(KERN_INFO "Kernel Crash \n");
	
				entry = create_proc_entry("last_kmsg_kernel_crash", S_IFREG | S_IRUGO, NULL);
				if (!entry) {
					printk(KERN_ERR "%s: failed to create proc entry\n", "last_kmsg_kernel_crash");
					if(smem_sreset_log)
						kfree(smem_sreset_log);
					smem_sreset_log = NULL;
					smem_sreset_log_size = 0;
					return 0;
				}
		
				if(smem_sreset_log)
				{
					sprintf(smem_sreset_log, "%s value : 0x%X\n", "kernel crash!!", *smem_sreset);
					smem_sreset_log_size = strlen(smem_sreset_log);
				}
				
				//                                                                           
				//entry->proc_fops = &smem_sreset_file_ops;
				//entry->size = smem_sreset_log_size;
		return 1;
		}
	else if(*smem_sreset == 0xDDEAEADD)
		{
				//1 create a file to notify modem crash
				printk(KERN_INFO "Kernel Crash \n");
	
				entry = create_proc_entry("last_kmsg_modem_crash", S_IFREG | S_IRUGO, NULL);
				if (!entry) 
				{
					printk(KERN_ERR "%s: failed to create proc entry\n", "last_kmsg_modem_crash");
					if(smem_sreset_log)
						kfree(smem_sreset_log);
					smem_sreset_log = NULL;
					smem_sreset_log_size = 0;
					return 0;
				}
		
				if(smem_sreset_log)
				{
					sprintf(smem_sreset_log, "%s value : 0x%X\n", "modem crash!!", *smem_sreset);
					smem_sreset_log_size = strlen(smem_sreset_log);
				}
	
				//                                                                           
				//entry->proc_fops = &smem_sreset_file_ops;
				//entry->size = smem_sreset_log_size;
		return 1;
		}
	}
			
	return 0;
}			
Beispiel #26
0
static int get_nand_partitions(void)
{
	struct flash_partition_table *partition_table;
	struct flash_partition_entry *part_entry;
	struct mtd_partition *ptn = msm_nand_partitions;
	char *name = msm_nand_names;
	int part;

	if (msm_nand_data.nr_parts)
		return 0;

	partition_table = (struct flash_partition_table *)
	    smem_alloc(SMEM_AARM_PARTITION_TABLE,
		       sizeof(struct flash_partition_table));

	if (!partition_table) {
		printk(KERN_WARNING "%s: no flash partition table in shared "
		       "memory\n", __func__);
		return -ENOENT;
	}

	if ((partition_table->magic1 != (u32) FLASH_PART_MAGIC1) ||
	    (partition_table->magic2 != (u32) FLASH_PART_MAGIC2) ||
	    (partition_table->version != (u32) FLASH_PARTITION_VERSION)) {
		printk(KERN_WARNING "%s: version mismatch -- magic1=%#x, "
		       "magic2=%#x, version=%#x\n", __func__,
		       partition_table->magic1,
		       partition_table->magic2,
		       partition_table->version);
		return -EFAULT;
	}

	msm_nand_data.nr_parts = 0;

	/* Get the LINUX FS partition info */
	for (part = 0; part < partition_table->numparts; part++) {
		part_entry = &partition_table->part_entry[part];

		/* Find a match for the Linux file system partition */
		if (strcmp(part_entry->name, LINUX_FS_PARTITION_NAME) == 0) {
			strcpy(name, part_entry->name);
			ptn->name = name;

			/*TODO: Get block count and size info */
			ptn->offset = part_entry->offset;

			/* For SMEM, -1 indicates remaining space in flash,
			 * but for MTD it is 0
			 */
			if (part_entry->length == (u32)-1)
				ptn->size = 0;
			else
				ptn->size = part_entry->length;

			msm_nand_data.nr_parts = 1;
			msm_nand_data.parts = msm_nand_partitions;

			printk(KERN_INFO "Partition(from smem) %s "
					"-- Offset:%llx Size:%llx\n",
					ptn->name, ptn->offset, ptn->size);

			return 0;
		}
	}

	printk(KERN_WARNING "%s: no partition table found!", __func__);

	return -ENODEV;
}
Beispiel #27
0
asmlinkage long sys_LGE_Dload_SRD (void *req_pkt_ptr, void *rsp_pkt_ptr)
{
    udbp_req_type *req_ptr = (udbp_req_type *) req_pkt_ptr;
    udbp_rsp_type *rsp_ptr = (udbp_rsp_type *) rsp_pkt_ptr;
    //uint16 rsp_len = pkg_len;
    int write_size=0 , mtd_op_result=0;


    // DIAG_TEST_MODE_F_rsp_type union type is greater than the actual size, decrease it in case sensitive items
    switch(req_ptr->header.sub_cmd)
    {
        case  SRD_INIT_OPERATION:
printk(KERN_INFO "[SRD] SRD_INIT_OPERATION \n");
            diag_SRD_Init(req_ptr,rsp_ptr);
            break;

        case USERDATA_BACKUP_REQUEST:
printk(KERN_INFO "[SRD] USERDATA_BACKIP_REQUEST \n");
            //remote_rpc_srd_cmmand(req_ptr, rsp_ptr);  //userDataBackUpStart() ���⼭ ... shared ram ���� �ϵ���. .. 
            diag_userDataBackUp_entrySet(req_ptr,rsp_ptr,0);  //write info data ,  after rpc respons include write_sector_counter  

            //todo ..  rsp_prt->header.write_sector_counter,  how about checking  no active nv item  ; 
            // write ram data to emmc misc partition  as many as retruned setor counters 
            load_srd_shard_base=smem_alloc(SMEM_ERR_CRASH_LOG, SIZE_OF_SHARD_RAM);  //384K byte 

            if (load_srd_shard_base ==NULL)
            {
                ((udbp_rsp_type*)rsp_ptr)->header.err_code = UDBU_ERROR_CANNOT_COMPLETE;
				printk(KERN_INFO "[SRD] backup req smem alloc fail!! ");
                break;
                // return rsp_ptr;
            }

            write_size= req_ptr->nv_counter *256; //return nv backup counters  
            printk(KERN_INFO "[SRD] backup req// nv_counter = %d", req_ptr->nv_counter);
            write_size= (req_ptr->header.packet_version/0x10000) *256; //return nv backup counters 
            //write_size = 512;
		printk(KERN_INFO "[SRD] backup req// pkt_version = %d", (int) req_ptr->header.packet_version);

            if( write_size >SIZE_OF_SHARD_RAM)
            {
                ((udbp_rsp_type*)rsp_ptr)->header.err_code = UDBU_ERROR_CANNOT_COMPLETE;  //hue..
                break;
            }

            load_srd_kernel_base=kmalloc((size_t)write_size, GFP_KERNEL);

            memcpy(load_srd_kernel_base,load_srd_shard_base,write_size);
            //srd_bytes_pos_in_emmc+512 means that info data already writed at emmc first sector 
            mtd_op_result = lge_write_srd_block(srd_bytes_pos_in_emmc+512, load_srd_kernel_base, write_size);  //512 info data 

            if(mtd_op_result!= write_size)
            {
                ((udbp_rsp_type*)rsp_ptr)->header.err_code = UDBU_ERROR_CANNOT_COMPLETE;
                kfree(load_srd_kernel_base);
				printk(KERN_INFO "[SRD] backup req// mtd_op != wtite_size");
                break;
            }

            kfree(load_srd_kernel_base);
            #if 0
            if ( !writeBackUpNVdata( load_srd_base , write_size))
            {
                ((udbp_rsp_type*)rsp_ptr)->header.err_code = UDBU_ERROR_CANNOT_COMPLETE;
                return rsp_ptr;
            }
            #endif
            break;

 

        case GET_DOWNLOAD_INFO :
            break;

        case EXTRA_NV_OPERATION :
            #ifdef LG_FW_SRD_EXTRA_NV
            diag_extraNv_entrySet(req_ptr,rsp_ptr);
            #endif
            break;

        case PRL_OPERATION :
            #ifdef LG_FW_SRD_PRL
            diag_PRL_entrySet(req_ptr,rsp_ptr);
            #endif
            break;

        default :
            rsp_ptr =NULL; //(void *) diagpkt_err_rsp (DIAG_BAD_PARM_F, req_ptr, pkt_len);
            break;
    }

    /* Execption*/
    if (rsp_ptr == NULL){
        return FALSE;
    }
	printk(KERN_INFO "[SRD] syscall complete rsp->err_code = %d ", (int)rsp_ptr->header.err_code);

  return TRUE;
}