예제 #1
0
/* audio interrupt handler */
static irqreturn_t twl6040_naudint_handler(int irq, void *data)
{
	static uint64_t tick = 0;
	struct snd_soc_codec *codec = data;
	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
	struct twl6040_jack_data *jack = &priv->hs_jack;
	u8 intid = 0;

	twl6040_i2c_read(TWL6040_REG_INTID, &intid);

	if (intid & TWL6040_THINT)
		dev_alert(codec->dev, "die temp over-limit detection\n");

	if (intid & TWL6040_UNPLUGINT)
	{
		wake_lock_timeout(&priv->wake_lock, 2 * HZ);
		
		tick = jiffies;
		jack->state = WIRED_HEADSET;
		cancel_delayed_work(&priv->hook_work);
		cancel_delayed_work(&priv->hsdet_dwork);
		schedule_delayed_work(&priv->hsdet_dwork, msecs_to_jiffies(200));
	}

	if (intid & TWL6040_PLUGINT)
	{
		u8 val;
		u8 mute = 0;
		
		wake_lock_timeout(&priv->wake_lock, 2 * HZ);
		
#if 1	
		if( twl6040_i2c_read(TWL6040_REG_MICLCTL, &val) == 0 ){
			if( !(val & 0x18) ){
				twl6040_i2c_write(TWL6040_REG_MICLCTL, 0x18); 	
				mute = 1;
			}
		}
		if( twl6040_i2c_read(TWL6040_REG_MICRCTL, &val) == 0 ){
			if( !(val & 0x18) ){
				twl6040_i2c_write(TWL6040_REG_MICRCTL, 0x18); 			
				mute = 1;
			}
		}
		if( mute ) twl6040_i2c_write(TWL6040_REG_AMICBCTL, 0x00); 
#endif
		tick = jiffies;
		jack->state = HEADSET_NONE;
		set_hook_enable(codec, 0);
		cancel_delayed_work(&priv->hook_work);
		cancel_delayed_work(&priv->hsdet_dwork);
		schedule_delayed_work(&priv->hsdet_dwork, msecs_to_jiffies(200));
	}

	if (intid & TWL6040_HOOKINT){
		if( jack->state > 0 && (tick == 0 || jiffies_to_msecs(jiffies-tick) > 500ul) )
		{
			tick = 0;
			printk(KERN_ERR" [AUD] %s - HOOKINT\n", __func__);
			schedule_delayed_work(&priv->hook_work, 0);
		}
	}

	if (intid & TWL6040_HFINT)
		dev_alert(codec->dev, "hf drivers over current detection\n");

	if (intid & TWL6040_VIBINT)
		dev_alert(codec->dev, "vib drivers over current detection\n");

	if (intid & TWL6040_READYINT)
		complete(&priv->ready);
	
	return IRQ_HANDLED;
}
예제 #2
0
void wl1271_scan_stm(struct wl1271 *wl)
{
	int ret = 0;

	switch (wl->scan.state) {
	case WL1271_SCAN_STATE_IDLE:
		break;

	case WL1271_SCAN_STATE_2GHZ_ACTIVE:
		ret = wl1271_scan_send(wl, IEEE80211_BAND_2GHZ, false,
				       wl->conf.tx.basic_rate);
		if (ret == WL1271_NOTHING_TO_SCAN) {
			wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
			wl1271_scan_stm(wl);
		}

		break;

	case WL1271_SCAN_STATE_2GHZ_PASSIVE:
		ret = wl1271_scan_send(wl, IEEE80211_BAND_2GHZ, true,
				       wl->conf.tx.basic_rate);
		if (ret == WL1271_NOTHING_TO_SCAN) {
			if (wl->enable_11a)
				wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
			else
				wl->scan.state = WL1271_SCAN_STATE_DONE;
			wl1271_scan_stm(wl);
		}

		break;

	case WL1271_SCAN_STATE_5GHZ_ACTIVE:
		ret = wl1271_scan_send(wl, IEEE80211_BAND_5GHZ, false,
				       wl->conf.tx.basic_rate_5);
		if (ret == WL1271_NOTHING_TO_SCAN) {
			wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
			wl1271_scan_stm(wl);
		}

		break;

	case WL1271_SCAN_STATE_5GHZ_PASSIVE:
		ret = wl1271_scan_send(wl, IEEE80211_BAND_5GHZ, true,
				       wl->conf.tx.basic_rate_5);
		if (ret == WL1271_NOTHING_TO_SCAN) {
			wl->scan.state = WL1271_SCAN_STATE_DONE;
			wl1271_scan_stm(wl);
		}

		break;

	case WL1271_SCAN_STATE_DONE:
		wl->scan.failed = false;
		cancel_delayed_work(&wl->scan_complete_work);
		ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
					     msecs_to_jiffies(0));
		break;

	default:
		wl1271_error("invalid scan state");
		break;
	}

	if (ret < 0) {
		cancel_delayed_work(&wl->scan_complete_work);
		ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
					     msecs_to_jiffies(0));
	}
}
예제 #3
0
void nfs_release_automount_timer(void)
{
	if (list_empty(&nfs_automount_list))
		cancel_delayed_work(&nfs_automount_task);
}
예제 #4
0
파일: brcm_headset.c 프로젝트: vM00/xm01
static void input_work_func(struct work_struct *work)
{
	int adc_value = -1;
	int val = 0;
	ktime_t temptime;

	adc_value = auxadc_access(2);
	val = readl(io_p2v(REG_ANACR12));
//	printk("%s: REG_ANACR2=%d, REG_ANACR12=%d\n", __func__,adc_value , val);

	if(val >= KEY_PRESS_THRESHOLD && adc_value >= KEY1_THRESHOLD_L && adc_value < KEY3_THRESHOLD_U)
	{
		temptime = ktime_get();
		temptime = ktime_sub(temptime, mic.hsbtime);

		if(temptime.tv.nsec < VALID_RELEASE_REF_TIME && mic.keypressing == PRESS)
		{
			if ( adc_value >= KEY1_THRESHOLD_L && adc_value < KEY1_THRESHOLD_U )
			{
				mic.key_count[0]++;
				printk ("KEY_BCM_HEADSET_BUTTON \n");
			}
			else if ( adc_value >= KEY2_THRESHOLD_L && adc_value < KEY2_THRESHOLD_U ) 
			{
				mic.key_count[1]++;
				printk ("KEY_VOLUMEUP \n");
			}
			else if ( adc_value >= KEY3_THRESHOLD_L && adc_value < KEY3_THRESHOLD_U ) 
			{
				mic.key_count[2]++;
				printk ("KEY_VOLUMEDOWN \n");
			}
		}
		else
		{
			if(mic.keypressing == PRESS && (mic.key_count[0] + mic.key_count[1] + mic.key_count[2]))
			{
				input_report_key(mic.headset_button_idev, Return_valid_key(mic.key_count), PRESS);
				input_sync(mic.headset_button_idev);

				set_button(1); 
				mic.keypressing = RELEASE;
			}
		}

		cancel_delayed_work(&(mic.input_work));
		queue_delayed_work(mic.headset_workqueue, &(mic.input_work), KEY_PRESS_REF_TIME);
	}
	else
	{
		if(mic.keypressing == RELEASE && (mic.key_count[0] + mic.key_count[1] + mic.key_count[2]))
		{			
			printk ("%s: RELEASE key_count [%d, %d, %d] \n", __func__,  mic.key_count[0], mic.key_count[1], mic.key_count[2]);
			input_report_key(mic.headset_button_idev, Return_valid_key(mic.key_count), RELEASE);
			input_sync(mic.headset_button_idev);
		}
		else
		{
			printk("%s: NO PRESS\n",  __func__);
		}

		if(FactoryMode == DISABLE)
		{
			board_sysconfig(SYSCFG_AUXMIC, SYSCFG_ENABLE | SYSCFG_DISABLE);
			sync_use_mic = DISABLE;		
		}
		
		set_button(0); 
		mic.keypressing = NONE;
	}
}
static int rtl_op_config( struct ieee80211_hw *hw, u32 changed )
{
	struct rtl_priv *rtlpriv = rtl_priv( hw );
	struct rtl_phy *rtlphy = &( rtlpriv->phy );
	struct rtl_mac *mac = rtl_mac( rtl_priv( hw ) );
	struct rtl_ps_ctl *ppsc = rtl_psc( rtl_priv( hw ) );
	struct ieee80211_conf *conf = &hw->conf;

	if ( mac->skip_scan )
		return 1;

	mutex_lock( &rtlpriv->locks.conf_mutex );
	if ( changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL ) {	/*BIT( 2 )*/
		RT_TRACE( rtlpriv, COMP_MAC80211, DBG_LOUD,
			 "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n" );
	}

	/*For IPS */
	if ( changed & IEEE80211_CONF_CHANGE_IDLE ) {
		if ( hw->conf.flags & IEEE80211_CONF_IDLE )
			rtl_ips_nic_off( hw );
		else
			rtl_ips_nic_on( hw );
	} else {
		/*
		 *although rfoff may not cause by ips, but we will
		 *check the reason in set_rf_power_state function
		 */
		if ( unlikely( ppsc->rfpwr_state == ERFOFF ) )
			rtl_ips_nic_on( hw );
	}

	/*For LPS */
	if ( changed & IEEE80211_CONF_CHANGE_PS ) {
		cancel_delayed_work( &rtlpriv->works.ps_work );
		cancel_delayed_work( &rtlpriv->works.ps_rfon_wq );
		if ( conf->flags & IEEE80211_CONF_PS ) {
			rtlpriv->psc.sw_ps_enabled = true;
			/* sleep here is must, or we may recv the beacon and
			 * cause mac80211 into wrong ps state, this will cause
			 * power save nullfunc send fail, and further cause
			 * pkt loss, So sleep must quickly but not immediatly
			 * because that will cause nullfunc send by mac80211
			 * fail, and cause pkt loss, we have tested that 5mA
			 * is worked very well */
			if ( !rtlpriv->psc.multi_buffered )
				queue_delayed_work( rtlpriv->works.rtl_wq,
						&rtlpriv->works.ps_work,
						MSECS( 5 ) );
		} else {
			rtl_swlps_rf_awake( hw );
			rtlpriv->psc.sw_ps_enabled = false;
		}
	}

	if ( changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS ) {
		RT_TRACE( rtlpriv, COMP_MAC80211, DBG_LOUD,
			 "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
			 hw->conf.long_frame_max_tx_count );
		mac->retry_long = hw->conf.long_frame_max_tx_count;
		mac->retry_short = hw->conf.long_frame_max_tx_count;
		rtlpriv->cfg->ops->set_hw_reg( hw, HW_VAR_RETRY_LIMIT,
					      ( u8 * ) ( &hw->conf.
						      long_frame_max_tx_count ) );
	}

	if ( changed & IEEE80211_CONF_CHANGE_CHANNEL ) {
		struct ieee80211_channel *channel = hw->conf.chandef.chan;
		u8 wide_chan = ( u8 ) channel->hw_value;

		if ( mac->act_scanning )
			mac->n_channels++;

		if ( rtlpriv->dm.supp_phymode_switch &&
		    mac->link_state < MAC80211_LINKED &&
		    !mac->act_scanning ) {
			if ( rtlpriv->cfg->ops->chk_switch_dmdp )
				rtlpriv->cfg->ops->chk_switch_dmdp( hw );
		}

		/*
		 *because we should back channel to
		 *current_network.chan in in scanning,
		 *So if set_chan == current_network.chan
		 *we should set it.
		 *because mac80211 tell us wrong bw40
		 *info for cisco1253 bw20, so we modify
		 *it here based on UPPER & LOWER
		 */
		switch ( cfg80211_get_chandef_type( &hw->conf.chandef ) ) {
		case NL80211_CHAN_HT20:
		case NL80211_CHAN_NO_HT:
			/* SC */
			mac->cur_40_prime_sc =
				PRIME_CHNL_OFFSET_DONT_CARE;
			rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20;
			mac->bw_40 = false;
			break;
		case NL80211_CHAN_HT40MINUS:
			/* SC */
			mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER;
			rtlphy->current_chan_bw =
				HT_CHANNEL_WIDTH_20_40;
			mac->bw_40 = true;

			/*wide channel */
			wide_chan -= 2;

			break;
		case NL80211_CHAN_HT40PLUS:
			/* SC */
			mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER;
			rtlphy->current_chan_bw =
				HT_CHANNEL_WIDTH_20_40;
			mac->bw_40 = true;

			/*wide channel */
			wide_chan += 2;

			break;
		default:
			mac->bw_40 = false;
			RT_TRACE( rtlpriv, COMP_ERR, DBG_EMERG,
				 "switch case not processed\n" );
			break;
		}

		if ( wide_chan <= 0 )
			wide_chan = 1;

		/* In scanning, before we go offchannel we may send a ps = 1
		 * null to AP, and then we may send a ps = 0 null to AP quickly,
		 * but first null may have caused AP to put lots of packet to
		 * hw tx buffer. These packets must be tx'd before we go off
		 * channel so we must delay more time to let AP flush these
		 * packets before going offchannel, or dis-association or
		 * delete BA will be caused by AP
		 */
		if ( rtlpriv->mac80211.offchan_delay ) {
			rtlpriv->mac80211.offchan_delay = false;
			mdelay( 50 );
		}
		rtlphy->current_channel = wide_chan;

		rtlpriv->cfg->ops->switch_channel( hw );
		rtlpriv->cfg->ops->set_channel_access( hw );
		rtlpriv->cfg->ops->set_bw_mode( hw,
				cfg80211_get_chandef_type( &hw->conf.chandef ) );
	}

	mutex_unlock( &rtlpriv->locks.conf_mutex );

	return 0;
}
예제 #6
0
//extern int resource_get_level(const char *name);
static void battery_monitor_work_handler( struct work_struct *work )
// ----------------------------------------------------------------------------
// Description    : 
// Input Argument :  
// Return Value   :
{
	int is_full = 0;
	int charge_current_adc;
	struct battery_device_info *di = container_of( work,
							struct battery_device_info,
							battery_monitor_work.work );
	//printk("OPP: %d\n", resource_get_level("vdd1_opp"));
	//printk( KERN_DEBUG "[BR] battery_monitor_work\n" );
#if 0//me open
    
	printk( "[BR] battery monitor [Level:%d, ADC:%d, TEMP.:%d, cable: %d] \n",\
		get_battery_level_ptg(),\
		get_battery_level_adc(),\
		get_system_temperature(TEMP_DEG ),\
		sec_bci.charger.cable_status );
#endif

	//printk("VF: %d\n", _get_t2adc_data_(1));


	/*Monitoring the battery info.*/
	sec_bci.battery.battery_level_ptg = get_battery_level_ptg();
	sec_bci.battery.battery_level_vol= get_battery_level_adc();
	
	if ( device_config->MONITORING_SYSTEM_TEMP )
		sec_bci.battery.battery_temp = get_system_temperature( TEMP_DEG );
	else
		sec_bci.battery.battery_temp = 0;
#if 1 //me add 
	printk( "[BR] monitor BATT.(%d%%, %dmV, %d*)\n", 
			sec_bci.battery.battery_level_ptg,
			sec_bci.battery.battery_level_vol,
			sec_bci.battery.battery_temp );
#endif

	if( !( sec_bci.battery.monitor_field_temp ) 
	 	&& !( sec_bci.battery.monitor_field_rechg_vol ) )
	{
		sec_bci.battery.monitor_duration = MONITOR_DEFAULT_DURATION;
	}
	else
	{
		// Workaround : check status of cabel at this point.
		if ( !_cable_status_now_() )
		{
			_battery_state_change_( STATUS_CATEGORY_ETC, 
						ETC_CABLE_IS_DISCONNECTED, 
						CHARGE_DUR_ACTIVE );

			return;
		}

		if ( sec_bci.charger.is_charging && device_config->MONITORING_CHG_CURRENT )
		// in charging && enable monitor_chg_current
		{
			charge_current_adc = get_charging_current_adc_val();
			is_full = check_full_charge_using_chg_current( charge_current_adc );

			if ( is_full )
			{
//				do_fuelgauge_reset();
				_battery_state_change_( STATUS_CATEGORY_CHARGING, 
							POWER_SUPPLY_STATUS_FULL, 
							CHARGE_DUR_ACTIVE );
			}
			else
				battery_monitor_core( CHARGE_DUR_ACTIVE );
		}
		else
		{
			battery_monitor_core( CHARGE_DUR_ACTIVE );
		}
			
	}


#if 0 
	printk( "[BR] monitor BATT.(%d%%, %dmV, %d*)\n", 
			sec_bci.battery.battery_level_ptg,
			sec_bci.battery.battery_level_vol,
			sec_bci.battery.battery_temp );
#endif

	power_supply_changed( &di->sec_battery );
	power_supply_changed( &di->sec_ac );
	power_supply_changed( &di->sec_usb );


	//schedule_delayed_work( &di->battery_monitor_work, 
				//sec_bci.battery.monitor_duration * HZ );

	cancel_delayed_work( &di->battery_monitor_work );
	queue_delayed_work( sec_bci.sec_battery_workq, 
				&di->battery_monitor_work, 
				sec_bci.battery.monitor_duration * HZ);


}
예제 #7
0
static void input_work_func(struct work_struct *work)
{
	int adc_value = -1;
	int val = 0;
	ktime_t temptime;

	adc_value = auxadc_access(2);
	val = readl(io_p2v(REG_ANACR12));
//	printk("%s: REG_ANACR2=%d, REG_ANACR12=%d\n", __func__,adc_value , val);

#ifdef USE_SERVICEMODE
	if(val >= KEY_PRESS_THRESHOLD && adc_value >= Min_threshold && adc_value < Max_threshold)
	{
#else
	if(val >= KEY_PRESS_THRESHOLD && adc_value >= KEY1_THRESHOLD_L && adc_value < KEY3_THRESHOLD_U)
	{
#endif
		temptime = ktime_get();
		temptime = ktime_sub(temptime, mic.hsbtime);

		if(temptime.tv.nsec < VALID_RELEASE_REF_TIME && mic.keypressing == PRESS)
		{
			if ( adc_value >= KEY1_THRESHOLD_L && adc_value < KEY1_THRESHOLD_U )
			{
				mic.key_count[0]++;
				printk ("KEY_BCM_HEADSET_BUTTON \n");
			}
			else if ( adc_value >= KEY2_THRESHOLD_L && adc_value < KEY2_THRESHOLD_U ) 
			{
				mic.key_count[1]++;
				printk ("KEY_VOLUMEUP \n");
			}
			else if ( adc_value >= KEY3_THRESHOLD_L && adc_value < KEY3_THRESHOLD_U ) 
			{
				mic.key_count[2]++;
				printk ("KEY_VOLUMEDOWN \n");
			}
		}
		else
		{
			if(mic.keypressing == PRESS && (mic.key_count[0] + mic.key_count[1] + mic.key_count[2]))
			{
				input_report_key(mic.headset_button_idev, Return_valid_key(mic.key_count), PRESS);
				input_sync(mic.headset_button_idev);

				set_button(1); 
				mic.keypressing = RELEASE;
			}
		}

		cancel_delayed_work(&(mic.input_work));
		queue_delayed_work(mic.headset_workqueue, &(mic.input_work), KEY_PRESS_REF_TIME);
	}
	else
	{
		if(mic.keypressing == RELEASE && (mic.key_count[0] + mic.key_count[1] + mic.key_count[2]))
		{			
			printk ("%s: RELEASE key_count [%d, %d, %d] \n", __func__,  mic.key_count[0], mic.key_count[1], mic.key_count[2]);
			input_report_key(mic.headset_button_idev, Return_valid_key(mic.key_count), RELEASE);
			input_sync(mic.headset_button_idev);
		}
		else
		{
			printk("%s: NO PRESS\n",  __func__);
		}

		if(FactoryMode == DISABLE)
		{
			board_sysconfig(SYSCFG_AUXMIC, SYSCFG_ENABLE | SYSCFG_DISABLE);
			sync_use_mic = DISABLE;		
		}
		
		set_button(0); 
		mic.keypressing = NONE;
	}
}

/*------------------------------------------------------------------------------
Function name   : hs_buttonisr
Description     : interrupt handler

Return type     : irqreturn_t
------------------------------------------------------------------------------*/
irqreturn_t hs_buttonisr(int irq, void *dev_id)
{
	struct mic_t *p = &mic;
	int val = 0;
	ktime_t temptime;
	
#ifdef USE_SERVICEMODE
	if(TestMode == ENABLE)
	{
		if(p->headset_state == HEADSET_4_POLE)
			board_sysconfig(SYSCFG_AUXMIC, SYSCFG_ENABLE);
		
		return IRQ_NONE;
	}
#endif
	
	if(mic.keypressing == INIT)
	{
		temptime = ktime_get();
		temptime = ktime_sub(temptime, mic.hsbtime);
		if(temptime.tv.sec >= 1 || temptime.tv.nsec >= KEY_INTERRUPT_REF_TIME)
			mic.keypressing = NONE;
		else
		{
		 	printk("%s: Initializing HSB ISR\n", __func__ );
			return IRQ_NONE;
		}
	}	

	if(p->pluging ==  ENABLE || p->keypressing != NONE)
	{
		printk("%s: Headset pluging OR keypressing\n", __func__ );
		return IRQ_NONE;
	}

	val = readl(io_p2v(REG_ANACR12));
	if(val < KEY_PRESS_THRESHOLD)
	{
		printk("%s: False button interrupt\n", __func__ );
		return IRQ_NONE;	
	}
	
	if (p->headset_state == HEADSET_4_POLE)
	{	
		p->hsbtime = ktime_get();
		
		board_sysconfig(SYSCFG_AUXMIC, SYSCFG_ENABLE);
		
		memset(mic.key_count, 0, sizeof(mic.key_count));
		p->keypressing = PRESS;
		sync_use_mic = ENABLE;

		cancel_delayed_work(&(mic.input_work));
		queue_delayed_work(mic.headset_workqueue, &(p->input_work), KEY_BEFORE_PRESS_REF_TIME);
	}

	 return IRQ_HANDLED;
}

/* 	1 : SIM_DUAL_FIRST,
	2 : SIM_DUAL_SECOND */
static void getIMSI_work_func(struct work_struct *work)
{
	SIMLOCK_SIM_DATA_t* simdata = NULL; 
	int first = DISABLE;
	int second = DISABLE;

	simdata = GetSIMData(SIM_DUAL_FIRST);
	first = ((simdata == NULL) || (strncmp(simdata->imsi_string, TEST_SIM_IMSI, IMSI_DIGITS) != 0)) ?  DISABLE : ENABLE;
	simdata = GetSIMData(SIM_DUAL_SECOND);
	second = ((simdata == NULL) || (strncmp(simdata->imsi_string, TEST_SIM_IMSI, IMSI_DIGITS) != 0)) ?  DISABLE : ENABLE;

	FactoryMode = (first == ENABLE || second == ENABLE) ? ENABLE : DISABLE;
	printk("%s: Factorymode %d\n", __func__, FactoryMode);

	if(FactoryMode == ENABLE)
	{
		if(mic.headset_state == HEADSET_4_POLE)
			board_sysconfig(SYSCFG_AUXMIC, SYSCFG_ENABLE);
	}
}
예제 #8
0
static int nvhost_pod_estimate_freq(struct devfreq *df,
				    unsigned long *freq)
{
	struct podgov_info_rec *podgov = df->data;
	struct devfreq_dev_status dev_stat;
	struct nvhost_devfreq_ext_stat *ext_stat;
	long delay;
	int current_event;
	int stat;
	ktime_t now;

	stat = df->profile->get_dev_status(df->dev.parent, &dev_stat);
	if (stat < 0)
		return stat;

	/* Ensure maximal clock when scaling is disabled */
	if (!podgov->enable) {
		*freq = df->max_freq;
		return 0;
	}

	if (podgov->p_user) {
		*freq = podgov->p_freq_request;
		return 0;
	}

	current_event = DEVICE_IDLE;
	stat = 0;
	now = ktime_get();

	/* Local adjustments (i.e. requests from kernel threads) are
	 * handled here */

	if (podgov->adjustment_type == ADJUSTMENT_LOCAL) {

		podgov->adjustment_type = ADJUSTMENT_DEVICE_REQ;

		/* Do not do unnecessary scaling */
		scaling_limit(df, &podgov->adjustment_frequency);
		if (df->previous_freq == podgov->adjustment_frequency)
			return GET_TARGET_FREQ_DONTSCALE;

		trace_podgov_estimate_freq(df->previous_freq,
			podgov->adjustment_frequency);

		*freq = podgov->adjustment_frequency;
		return 0;
	}

	/* Retrieve extended data */
	ext_stat = dev_stat.private_data;
	if (!ext_stat)
		return -EINVAL;

	current_event = ext_stat->busy;
	*freq = dev_stat.current_frequency;
	df->min_freq = ext_stat->min_freq;
	df->max_freq = ext_stat->max_freq;

	/* Sustain local variables */
	podgov->last_event_type = current_event;
	podgov->total_idle += (dev_stat.total_time - dev_stat.busy_time);
	podgov->last_total_idle += (dev_stat.total_time - dev_stat.busy_time);

	/* update the load estimate based on idle time */
	update_load_estimate(df);

	/* if throughput hint enabled, and last hint is recent enough, return */
	if (podgov->p_use_throughput_hint &&
		ktime_us_delta(now, podgov->last_throughput_hint) < 1000000)
		return GET_TARGET_FREQ_DONTSCALE;

	switch (current_event) {

	case DEVICE_IDLE:
		/* delay idle_max % of 2 * fast_response time (given in
		 * microseconds) */
		*freq = scaling_state_check(df, now);
		delay = (podgov->idle_max * podgov->p_estimation_window)
			/ 500000;
		schedule_delayed_work(&podgov->idle_timer,
			msecs_to_jiffies(delay));
		break;
	case DEVICE_BUSY:
		cancel_delayed_work(&podgov->idle_timer);
		*freq = scaling_state_check(df, now);
		break;
	case DEVICE_UNKNOWN:
		*freq = scaling_state_check(df, now);
		break;

	}

	if (!(*freq) || (*freq == df->previous_freq))
		return GET_TARGET_FREQ_DONTSCALE;

	trace_podgov_estimate_freq(df->previous_freq, *freq);


	return 0;
}
예제 #9
0
static bool fslepdc_send_update(struct mxcfb_update_data *update_data, bool retry)
{
    bool result = false;
    
    if ( update_data )
    {
        unsigned long start_time, stop_time;
        int send_update_err;
        
        // If this isn't a retry...
        //
        if ( !retry )
        {
            // ...cancel any pending retries.
            //
            cancel_delayed_work(&fslepdc_send_update_work);
            
            // But accumulate any pending retry with the new data.
            //
            if ( fslepdc_send_update_retry_counter )
            {
                struct mxcfb_rect old_retry = fslepdc_send_update_retry_data.update_region,
                                  new_retry,
                                  update    = update_data->update_region;
                u32               old_retry_right,
                                  old_retry_bot,
                                  new_retry_right,
                                  new_retry_bot,
                                  update_right,
                                  update_bot;
                
                // First, accumulate the update region.
                //
                old_retry_right  = (old_retry.left + old_retry.width)  - 1;
                old_retry_bot    = (old_retry.top  + old_retry.height) - 1;
                update_right     = (update.left    + update.width)     - 1;
                update_bot       = (update.top     + update.height)    - 1;
                
                new_retry.left   = min(old_retry.left,  update.left);
                new_retry.top    = min(old_retry.top,   update.top);
                new_retry_right  = max(old_retry_right, update_right);
                new_retry_bot    = max(old_retry_bot,   update_bot);
                
                new_retry.width  = (new_retry_right - new_retry.left)  + 1;
                new_retry.height = (new_retry_bot   - new_retry.top)   + 1;
                
                fslepdc_send_update_retry_data.update_region = new_retry;
                
                // Since it's a retry, go for the highest fidelity possible.
                //
                fslepdc_send_update_retry_data.waveform_mode = fslepdc_get_waveform_mode(WF_UPD_MODE_GC);
                fslepdc_send_update_retry_data.update_mode   = UPDATE_MODE_FULL;
                
                // Use the latest marker and temperature.
                //
                fslepdc_send_update_retry_data.update_marker = update_data->update_marker;
                fslepdc_send_update_retry_data.temp          = update_data->temp;
                
                // Copy the retry data back for this attempt.
                //
                *update_data = fslepdc_send_update_retry_data;
            }
        }
        
        // We can get errors sending updates to EPDC if it's not ready to do
        // an update yet.  So, back off and retry a few times here first
        // before scheduling a retry.
        //
        start_time = jiffies; stop_time = start_time + FSLEPDC_SU_TIMEOUT;    

        do
        {
            send_update_err = mxc_epdc_fb_send_update(update_data, NULL);
            
            if ( 0 != send_update_err )
            {
                einkfb_print_error("EPDC_send_update_error=%d:\n", send_update_err);
                schedule_timeout_uninterruptible(FSLEPDC_SU_WAIT);
            }
        }
        while ( (0 != send_update_err) && time_before_eq(jiffies, stop_time) );
        
        if ( time_after(jiffies, stop_time) )
        {
             einkfb_print_crit("EDPC_send_update_timed_out=true:\n");
        }
        else
        {
            char temp_string[16];
            
            if ( TEMP_USE_AMBIENT == update_data->temp )
                strcpy(temp_string, "ambient");
            else
                sprintf(temp_string, "%d", update_data->temp);
            
            einkfb_debug("update_data:\n");
            einkfb_debug("  rect x: %d\n", update_data->update_region.left);
            einkfb_debug("  rect y: %d\n", update_data->update_region.top);
            einkfb_debug("  rect w: %d\n", update_data->update_region.width);
            einkfb_debug("  rect h: %d\n", update_data->update_region.height);
            einkfb_debug("  wfmode: %d\n", update_data->waveform_mode);
            einkfb_debug("  update: %s\n", update_data->update_mode ? "flashing" : "non-flashing");
            einkfb_debug("  marker: %d\n", update_data->update_marker);
            einkfb_debug("  temp:   %s\n", temp_string);
            
            fslepdc_send_update_retry_counter = 0;
            result = true;
        }

        // If our attempt to send an update failed, try it again later.
        //
        if ( !result )
        {
            if ( FSLEPDC_SU_RETRIES > ++fslepdc_send_update_retry_counter )
            {
                // If this isn't a retry, use the current update data.
                //
                if ( !retry )
                    fslepdc_send_update_retry_data = *update_data;
                
                schedule_delayed_work(&fslepdc_send_update_work, FSLEPDC_SU_DELAY);
            }
            else
            {
                einkfb_print_crit("Updates are failing...\n");
            }
        }
    }
    
    return ( result );
}
예제 #10
0
static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
{
	int ret;
	unsigned long end_time;
	bool status_down = false;
	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
	struct device *dev = mdm->dev;

	switch (cmd) {
	case ESOC_PWR_ON:
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
		mdm_enable_irqs(mdm);
		mdm->init = 1;
		mdm_do_first_power_on(mdm);
		break;
	case ESOC_PWR_OFF:
		mdm_disable_irqs(mdm);
		mdm->debug = 0;
		mdm->ready = false;
		ret = sysmon_send_shutdown(mdm->sysmon_subsys_id);
		device_lock(dev);
		if (ret)
			dev_err(mdm->dev, "Graceful shutdown fail, ret = %d\n",
									ret);
		else {
			dev_info(mdm->dev, "Waiting for status gpio go low\n");
			status_down = false;
			end_time = jiffies + msecs_to_jiffies(10000);
			while (time_before(jiffies, end_time)) {
				if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS))
									== 0) {
					dev_dbg(dev, "Status went low\n");
					status_down = true;
					break;
				}
				msleep(100);
			}
			if (status_down)
				dev_info(dev, "shutdown successful\n");
			else
				dev_err(mdm->dev, "graceful poff ipc fail\n");
		}
		/*
		 * Force a shutdown of the mdm. This is required in order
		 * to prevent the mdm from immediately powering back on
		 * after the shutdown
		 */
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
		esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
		mdm_power_down(mdm);
		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
		device_unlock(dev);
		break;
	case ESOC_RESET:
		mdm_toggle_soft_reset(mdm);
		break;
	case ESOC_PREPARE_DEBUG:
		/*
		 * disable all irqs except request irq (pblrdy)
		 * force a reset of the mdm by signaling
		 * an APQ crash, wait till mdm is ready for ramdumps.
		 */
		mdm->ready = false;
		cancel_delayed_work(&mdm->mdm2ap_status_check_work);
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
		dev_info(mdm->dev, "set ap2mdm errfatal to force reset\n");
		msleep(mdm->ramdump_delay_ms);
		break;
	case ESOC_EXE_DEBUG:
		mdm->debug = 1;
		mdm_toggle_soft_reset(mdm);
		/*
		 * wait for ramdumps to be collected
		 * then power down the mdm and switch gpios to booting
		 * config
		 */
		if (!wait_for_completion_timeout(&mdm->debug_done,
				msecs_to_jiffies(mdm->dump_timeout_ms))) {
			dev_err(mdm->dev, "ramdump collection timedout\n");
			mdm->debug = 0;
			return -ETIMEDOUT;
		}
		if (mdm->debug_fail) {
			dev_err(mdm->dev, "unable to collect ramdumps\n");
			mdm->debug = 0;
			return -EIO;
		}
		dev_dbg(mdm->dev, "ramdump collection done\n");
		mdm->debug = 0;
		init_completion(&mdm->debug_done);
		break;
	case ESOC_EXIT_DEBUG:
		/*
		 * Deassert APQ to mdm err fatal
		 * Power on the mdm
		 */
		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
		dev_dbg(mdm->dev, "exiting debug state after power on\n");
		mdm->get_restart_reason = true;
	      break;
	default:
	      return -EINVAL;
	};
	return 0;
}
예제 #11
0
파일: scan.c 프로젝트: 03199618/linux
static int iwl_send_scan_abort(struct iwl_priv *priv)
{
	int ret;
	struct iwl_host_cmd cmd = {
		.id = REPLY_SCAN_ABORT_CMD,
		.flags = CMD_SYNC | CMD_WANT_SKB,
	};
	__le32 *status;

	/* Exit instantly with error when device is not ready
	 * to receive scan abort command or it does not perform
	 * hardware scan currently */
	if (!test_bit(STATUS_READY, &priv->status) ||
	    !test_bit(STATUS_SCAN_HW, &priv->status) ||
	    test_bit(STATUS_FW_ERROR, &priv->status))
		return -EIO;

	ret = iwl_dvm_send_cmd(priv, &cmd);
	if (ret)
		return ret;

	status = (void *)cmd.resp_pkt->data;
	if (*status != CAN_ABORT_STATUS) {
		/* The scan abort will return 1 for success or
		 * 2 for "failure".  A failure condition can be
		 * due to simply not being in an active scan which
		 * can occur if we send the scan abort before we
		 * the microcode has notified us that a scan is
		 * completed. */
		IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n",
			       le32_to_cpu(*status));
		ret = -EIO;
	}

	iwl_free_resp(&cmd);
	return ret;
}

static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
{
	/* check if scan was requested from mac80211 */
	if (priv->scan_request) {
		IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
		ieee80211_scan_completed(priv->hw, aborted);
	}

	priv->scan_type = IWL_SCAN_NORMAL;
	priv->scan_vif = NULL;
	priv->scan_request = NULL;
}

static void iwl_process_scan_complete(struct iwl_priv *priv)
{
	bool aborted;

	lockdep_assert_held(&priv->mutex);

	if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->status))
		return;

	IWL_DEBUG_SCAN(priv, "Completed scan.\n");

	cancel_delayed_work(&priv->scan_check);

	aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
	if (aborted)
		IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");

	if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
		IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
		goto out_settings;
	}

	if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
		int err;

		/* Check if mac80211 requested scan during our internal scan */
		if (priv->scan_request == NULL)
			goto out_complete;

		/* If so request a new scan */
		err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
					priv->scan_request->channels[0]->band);
		if (err) {
			IWL_DEBUG_SCAN(priv,
				"failed to initiate pending scan: %d\n", err);
			aborted = true;
			goto out_complete;
		}

		return;
	}

out_complete:
	iwl_complete_scan(priv, aborted);

out_settings:
	/* Can we still talk to firmware ? */
	if (!iwl_is_ready_rf(priv))
		return;

	iwlagn_post_scan(priv);
}
예제 #12
0
/*
 * Called by ALSA when the PCM substream is prepared, can set format, sample
 * rate, etc.  This function is non atomic and can be called multiple times,
 * it can refer to the runtime info.
 */
static int soc_pcm_prepare(struct snd_pcm_substream *substream)
{
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct snd_soc_platform *platform = rtd->platform;
	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
	struct snd_soc_dai *codec_dai = rtd->codec_dai;
	int ret = 0;

	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);

	if (rtd->dai_link->ops && rtd->dai_link->ops->prepare) {
		ret = rtd->dai_link->ops->prepare(substream);
		if (ret < 0) {
			pr_err("asoc: machine prepare error: %d\n", ret);
			goto out;
		}
	}

	if (platform->driver->ops && platform->driver->ops->prepare) {
		ret = platform->driver->ops->prepare(substream);
		if (ret < 0) {
			dev_err(platform->dev, "platform prepare error: %d\n",
				ret);
			goto out;
		}
	}

	if (codec_dai->driver->ops->prepare) {
		ret = codec_dai->driver->ops->prepare(substream, codec_dai);
		if (ret < 0) {
			dev_err(codec_dai->dev, "DAI prepare error: %d\n",
				ret);
			goto out;
		}
	}

	if (cpu_dai->driver->ops->prepare) {
		ret = cpu_dai->driver->ops->prepare(substream, cpu_dai);
		if (ret < 0) {
			dev_err(cpu_dai->dev, "DAI prepare error: %d\n",
				ret);
			goto out;
		}
	}

	/* cancel any delayed stream shutdown that is pending */
	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
	    codec_dai->pop_wait) {
		codec_dai->pop_wait = 0;
		cancel_delayed_work(&rtd->delayed_work);
	}

	snd_soc_dapm_stream_event(rtd, substream->stream, codec_dai,
				  SND_SOC_DAPM_STREAM_START);

	snd_soc_dai_digital_mute(codec_dai, 0);

out:
	mutex_unlock(&rtd->pcm_mutex);
	return ret;
}
int
ieee80211softmac_wx_set_essid(struct net_device *net_dev,
			      struct iw_request_info *info,
			      union iwreq_data *data,
			      char *extra)
{
	struct ieee80211softmac_device *sm = ieee80211_priv(net_dev);
	struct ieee80211softmac_network *n;
	struct ieee80211softmac_auth_queue_item *authptr;
	int length = 0;

	mutex_lock(&sm->associnfo.mutex);

	/* Check if we're already associating to this or another network
	 * If it's another network, cancel and start over with our new network
	 * If it's our network, ignore the change, we're already doing it!
	 */
	if((sm->associnfo.associating || sm->associnfo.associated) &&
	   (data->essid.flags && data->essid.length)) {
		/* Get the associating network */
		n = ieee80211softmac_get_network_by_bssid(sm, sm->associnfo.bssid);
		if(n && n->essid.len == data->essid.length &&
		   !memcmp(n->essid.data, extra, n->essid.len)) {
			dprintk(KERN_INFO PFX "Already associating or associated to "MAC_FMT"\n",
				MAC_ARG(sm->associnfo.bssid));
			goto out;
		} else {
			dprintk(KERN_INFO PFX "Canceling existing associate request!\n");
			/* Cancel assoc work */
			cancel_delayed_work(&sm->associnfo.work);
			/* We don't have to do this, but it's a little cleaner */
			list_for_each_entry(authptr, &sm->auth_queue, list)
				cancel_delayed_work(&authptr->work);
			sm->associnfo.bssvalid = 0;
			sm->associnfo.bssfixed = 0;
			flush_scheduled_work();
			sm->associnfo.associating = 0;
			sm->associnfo.associated = 0;
		}
	}


	sm->associnfo.static_essid = 0;
	sm->associnfo.assoc_wait = 0;

	if (data->essid.flags && data->essid.length) {
		length = min((int)data->essid.length, IW_ESSID_MAX_SIZE);
		if (length) {
			memcpy(sm->associnfo.req_essid.data, extra, length);
			sm->associnfo.static_essid = 1;
		}
	}

	/* set our requested ESSID length.
	 * If applicable, we have already copied the data in */
	sm->associnfo.req_essid.len = length;

	sm->associnfo.associating = 1;
	/* queue lower level code to do work (if necessary) */
	schedule_delayed_work(&sm->associnfo.work, 0);
out:
	mutex_unlock(&sm->associnfo.mutex);

	return 0;
}
예제 #14
0
static inline void cancel_mac_stats_update(struct adapter *ap)
{
	cancel_delayed_work(&ap->stats_update_task);
}
예제 #15
0
static inline void set_h3_irda_mode(int mode)
{
	cancel_delayed_work(&set_h3_gpio_expa_work);
	which_speed = mode;
	schedule_work(&set_h3_gpio_expa_work);
}
static int omap_cpu_thermal_manager(struct list_head *cooling_list, int temp)
{
	int cpu_temp, zone = NO_ACTION;
	bool set_cooling_level = true;

	omap_gov->sensor_temp = temp;
	cpu_temp = convert_omap_sensor_temp_to_hotspot_temp(temp);

	if (cpu_temp >= OMAP_FATAL_TEMP) {
		omap_fatal_zone(cpu_temp);
		return FATAL_ZONE;
	} else if (cpu_temp >= OMAP_PANIC_TEMP) {
		int temp_upper;

		omap_gov->panic_zone_reached++;
		temp_upper = (((OMAP_FATAL_TEMP - OMAP_PANIC_TEMP) / 4) *
				omap_gov->panic_zone_reached) + OMAP_PANIC_TEMP;
		if (temp_upper >= OMAP_FATAL_TEMP)
			temp_upper = OMAP_FATAL_TEMP;
		omap_thermal_zones[PANIC_ZONE - 1].temp_upper = temp_upper;
		zone = PANIC_ZONE;
	} else if (cpu_temp < (OMAP_PANIC_TEMP - HYSTERESIS_VALUE)) {
		if (cpu_temp >= OMAP_ALERT_TEMP) {
			set_cooling_level = omap_gov->panic_zone_reached == 0;
			zone = ALERT_ZONE;
		} else if (cpu_temp < (OMAP_ALERT_TEMP - HYSTERESIS_VALUE)) {
			if (cpu_temp >= OMAP_MONITOR_TEMP) {
				omap_gov->panic_zone_reached = 0;
				zone = MONITOR_ZONE;
			} else {
				/*
				 * this includes the case where :
				 * (OMAP_MONITOR_TEMP - HYSTERESIS_VALUE) <= T
				 * && T < OMAP_MONITOR_TEMP
				 */
				omap_gov->panic_zone_reached = 0;
				zone = SAFE_ZONE;
			}
		} else {
			/*
			 * this includes the case where :
			 * (OMAP_ALERT_TEMP - HYSTERESIS_VALUE) <= T
			 * && T < OMAP_ALERT_TEMP
			 */
			omap_gov->panic_zone_reached = 0;
			zone = MONITOR_ZONE;
		}
	} else {
		/*
		 * this includes the case where :
		 * (OMAP_PANIC_TEMP - HYSTERESIS_VALUE) <= T < OMAP_PANIC_TEMP
		 */
		set_cooling_level = omap_gov->panic_zone_reached == 0;
		zone = ALERT_ZONE;
	}

	if (zone != NO_ACTION) {
		struct omap_thermal_zone *therm_zone;

		therm_zone = &omap_thermal_zones[zone - 1];
		if (omap_gov->panic_zone_reached)
			start_panic_guard();
		else
			cancel_delayed_work(&omap_gov->decrease_mpu_freq_work);

		if ((omap_gov->prev_zone != zone) || (zone == PANIC_ZONE)) {
			pr_info("%s:sensor %d avg sensor %d pcb ",
				 __func__, temp,
				 omap_gov->avg_cpu_sensor_temp);
			pr_info("%d, delta %d hot spot %d\n",
				 omap_gov->pcb_temp, omap_gov->absolute_delta,
				 cpu_temp);
			pr_info("%s: hot spot temp %d - going into %s zone\n",
				__func__, cpu_temp, therm_zone->name);
			omap_gov->prev_zone = zone;
		}
		omap_enter_zone(therm_zone, set_cooling_level,
				cooling_list, cpu_temp);
	}

	omap_gov->zone_info = zone;

	return zone;
}
void _mali_osk_wq_delayed_cancel_work_async( _mali_osk_wq_delayed_work_t *work )
{
	mali_osk_wq_delayed_work_object_t *work_object = (mali_osk_wq_delayed_work_object_t *)work;
	cancel_delayed_work(&work_object->work);
}
예제 #18
0
파일: tmu.c 프로젝트: yerlirock/void-kernel
static irqreturn_t exynos4x12_tmu_irq_handler(int irq, void *id)
{
	struct s5p_tmu_info *info = id;
	unsigned int status;

	disable_irq_nosync(irq);

	status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT) & 0x1FFFF;
	pr_info("EXYNOS4x12_tmu interrupt: INTSTAT = 0x%08x\n", status);

	/* To handle multiple interrupt pending,
	 * interrupt by high temperature are serviced with priority.
	*/
#if defined(CONFIG_TC_VOLTAGE)
	if (status & INTSTAT_FALL0) {
		info->tmu_state = TMU_STATUS_TC;

		__raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
		exynos_interrupt_enable(info, 0);
	} else if (status & INTSTAT_RISE2) {
		info->tmu_state = TMU_STATUS_TRIPPED;
		__raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
#else
	if (status & INTSTAT_RISE2) {
		info->tmu_state = TMU_STATUS_TRIPPED;
		__raw_writel(INTCLEAR_RISE2, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
#endif
	} else if (status & INTSTAT_RISE1) {
		info->tmu_state = TMU_STATUS_WARNING;
		__raw_writel(INTCLEAR_RISE1, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
	} else if (status & INTSTAT_RISE0) {
		info->tmu_state = TMU_STATUS_THROTTLED;
		__raw_writel(INTCLEAR_RISE0, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
	} else {
		pr_err("%s: interrupt error\n", __func__);
		__raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
		queue_delayed_work_on(0, tmu_monitor_wq,
			&info->polling, info->sampling_rate / 2);
		return -ENODEV;
	}

	/* read current temperature & save */
	info->last_temperature =  get_curr_temp(info);

	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
		info->sampling_rate);

	return IRQ_HANDLED;
}

static irqreturn_t exynos4210_tmu_irq_handler(int irq, void *id)
{
	struct s5p_tmu_info *info = id;
	unsigned int status;

	disable_irq_nosync(irq);

	status = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTSTAT);
	pr_info("EXYNOS4212_tmu interrupt: INTSTAT = 0x%08x\n", status);

	/* To handle multiple interrupt pending,
	 * interrupt by high temperature are serviced with priority.
	*/
	if (status & TMU_INTSTAT2) {
		info->tmu_state = TMU_STATUS_TRIPPED;
		__raw_writel(INTCLEAR2, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
	} else if (status & TMU_INTSTAT1) {
		info->tmu_state = TMU_STATUS_WARNING;
		__raw_writel(INTCLEAR1, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
	} else if (status & TMU_INTSTAT0) {
		info->tmu_state = TMU_STATUS_THROTTLED;
		__raw_writel(INTCLEAR0, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
	} else {
		pr_err("%s: interrupt error\n", __func__);
		__raw_writel(INTCLEARALL, info->tmu_base + EXYNOS4_TMU_INTCLEAR);
		queue_delayed_work_on(0, tmu_monitor_wq,
			&info->polling, info->sampling_rate / 2);
		return -ENODEV;
	}

	/* read current temperature & save */
	info->last_temperature =  get_curr_temp(info);

	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
		info->sampling_rate);

	return IRQ_HANDLED;
}

#ifdef CONFIG_TMU_SYSFS
static ssize_t s5p_tmu_show_curr_temp(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct s5p_tmu_info *info = dev_get_drvdata(dev);
	unsigned int curr_temp;

	curr_temp = get_curr_temp(info);
	curr_temp *= 10;
	pr_info("curr temp = %d\n", curr_temp);

	return sprintf(buf, "%d\n", curr_temp);
}
static DEVICE_ATTR(curr_temp, S_IRUGO, s5p_tmu_show_curr_temp, NULL);
#endif

static int __devinit s5p_tmu_probe(struct platform_device *pdev)
{
	struct s5p_tmu_info *info;
	struct s5p_platform_tmu *pdata;
	struct resource *res;
	unsigned int mask = (enable_mask & ENABLE_DBGMASK);
	int ret = 0;

	pr_debug("%s: probe=%p\n", __func__, pdev);

	info = kzalloc(sizeof(struct s5p_tmu_info), GFP_KERNEL);
	if (!info) {
		dev_err(&pdev->dev, "failed to alloc memory!\n");
		ret = -ENOMEM;
		goto err_nomem;
	}
	platform_set_drvdata(pdev, info);

	info->dev = &pdev->dev;
	info->tmu_state = TMU_STATUS_INIT;

	/* set cpufreq limit level at 1st_throttle & 2nd throttle */
	pdata = info->dev->platform_data;
	if (pdata->cpufreq.limit_1st_throttle)
		exynos_cpufreq_get_level(pdata->cpufreq.limit_1st_throttle,
				&info->cpufreq_level_1st_throttle);

	if (pdata->cpufreq.limit_2nd_throttle)
		exynos_cpufreq_get_level(pdata->cpufreq.limit_2nd_throttle,
				&info->cpufreq_level_2nd_throttle);

	pr_info("@@@ %s: cpufreq_limit: 1st_throttle: %u, 2nd_throttle = %u\n",
		__func__, info->cpufreq_level_1st_throttle,
		 info->cpufreq_level_2nd_throttle);

#if defined(CONFIG_TC_VOLTAGE) /* Temperature compensated voltage */
	if (exynos_find_cpufreq_level_by_volt(pdata->temp_compensate.arm_volt,
		&info->cpulevel_tc) < 0) {
		dev_err(&pdev->dev, "cpufreq_get_level error\n");
		ret = -EINVAL;
		goto err_nores;
	}
#ifdef CONFIG_BUSFREQ_OPP
	/* To lock bus frequency in OPP mode */
	info->bus_dev = dev_get("exynos-busfreq");
	if (info->bus_dev < 0) {
		dev_err(&pdev->dev, "Failed to get_dev\n");
		ret = -EINVAL;
		goto err_nores;
	}
	if (exynos4x12_find_busfreq_by_volt(pdata->temp_compensate.bus_volt,
		&info->busfreq_tc)) {
		dev_err(&pdev->dev, "get_busfreq_value error\n");
		ret = -EINVAL;
		goto err_nores;
	}
#endif
	pr_info("%s: cpufreq_level[%u], busfreq_value[%u]\n",
		 __func__, info->cpulevel_tc, info->busfreq_tc);
#endif
	/* Map auto_refresh_rate of normal & tq0 mode */
	info->auto_refresh_tq0 =
		get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_TQ0);
	info->auto_refresh_normal =
		get_refresh_interval(FREQ_IN_PLL, AUTO_REFRESH_PERIOD_NORMAL);

	/* To poll current temp, set sampling rate to ONE second sampling */
	info->sampling_rate  = usecs_to_jiffies(1000 * 1000);
	/* 10sec monitroing */
	info->monitor_period = usecs_to_jiffies(10000 * 1000);

	/* support test mode */
	if (mask & ENABLE_TEST_MODE)
		set_temperature_params(info);
	else
		print_temperature_params(info);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "failed to get memory region resource\n");
		ret = -ENODEV;
		goto err_nores;
	}

	info->ioarea = request_mem_region(res->start,
			res->end-res->start + 1, pdev->name);
	if (!(info->ioarea)) {
		dev_err(&pdev->dev, "failed to reserve memory region\n");
		ret = -EBUSY;
		goto err_nores;
	}

	info->tmu_base = ioremap(res->start, (res->end - res->start) + 1);
	if (!(info->tmu_base)) {
		dev_err(&pdev->dev, "failed ioremap()\n");
		ret = -ENOMEM;
		goto err_nomap;
	}
	tmu_monitor_wq = create_freezable_workqueue(dev_name(&pdev->dev));
	if (!tmu_monitor_wq) {
		pr_info("Creation of tmu_monitor_wq failed\n");
		ret = -ENOMEM;
		goto err_wq;
	}

	/* To support periodic temprature monitoring */
	if (mask & ENABLE_TEMP_MON) {
		INIT_DELAYED_WORK_DEFERRABLE(&info->monitor,
					exynos4_poll_cur_temp);
		queue_delayed_work_on(0, tmu_monitor_wq, &info->monitor,
			info->monitor_period);
	}
	INIT_DELAYED_WORK_DEFERRABLE(&info->polling, exynos4_handler_tmu_state);

	info->irq = platform_get_irq(pdev, 0);
	if (info->irq < 0) {
		dev_err(&pdev->dev, "no irq for thermal %d\n", info->irq);
		ret = -EINVAL;
		goto err_irq;
	}

	if (soc_is_exynos4210())
		ret = request_irq(info->irq, exynos4210_tmu_irq_handler,
				IRQF_DISABLED,  "s5p-tmu interrupt", info);
	else
		ret = request_irq(info->irq, exynos4x12_tmu_irq_handler,
				IRQF_DISABLED,  "s5p-tmu interrupt", info);

	if (ret) {
		dev_err(&pdev->dev, "request_irq is failed. %d\n", ret);
		goto err_irq;
	}

	ret = device_create_file(&pdev->dev, &dev_attr_temperature);
	if (ret != 0) {
		pr_err("Failed to create temperatue file: %d\n", ret);
		goto err_sysfs_file1;
	}

	ret = device_create_file(&pdev->dev, &dev_attr_tmu_state);
	if (ret != 0) {
		pr_err("Failed to create tmu_state file: %d\n", ret);
		goto err_sysfs_file2;
	}
	ret = device_create_file(&pdev->dev, &dev_attr_lot_id);
	if (ret != 0) {
		pr_err("Failed to create lot id file: %d\n", ret);
		goto err_sysfs_file3;
	}

	ret = tmu_initialize(pdev);
	if (ret)
		goto err_init;

#ifdef CONFIG_TMU_SYSFS
	ret = device_create_file(&pdev->dev, &dev_attr_curr_temp);
	if (ret < 0) {
		dev_err(&pdev->dev, "Failed to create sysfs group\n");
		goto err_init;
	}
#endif

#ifdef CONFIG_TMU_DEBUG
	ret = device_create_file(&pdev->dev, &dev_attr_print_state);
	if (ret) {
		dev_err(&pdev->dev, "Failed to create tmu sysfs group\n\n");
		return ret;
	}
#endif

#if defined(CONFIG_TC_VOLTAGE)
	/* s/w workaround for fast service when interrupt is not occured,
	 * such as current temp is lower than tc interrupt temperature
	 * or current temp is continuosly increased.
	*/
	if (get_curr_temp(info) <= pdata->ts.start_tc) {
		if (exynos_tc_volt(info, 1) < 0)
			pr_err("TMU: lock error!\n");
	}
#if defined(CONFIG_VIDEO_MALI400MP)
	if (mali_voltage_lock_init())
		pr_err("Failed to initialize mail voltage lock.\n");
#endif
#endif

	/* initialize tmu_state */
	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling,
		info->sampling_rate);

	return ret;

err_init:
	device_remove_file(&pdev->dev, &dev_attr_lot_id);

err_sysfs_file3:
	device_remove_file(&pdev->dev, &dev_attr_tmu_state);

err_sysfs_file2:
	device_remove_file(&pdev->dev, &dev_attr_temperature);

err_sysfs_file1:
	if (info->irq >= 0)
		free_irq(info->irq, info);

err_irq:
	destroy_workqueue(tmu_monitor_wq);

err_wq:
	iounmap(info->tmu_base);

err_nomap:
	release_resource(info->ioarea);
	kfree(info->ioarea);

err_nores:
	kfree(info);
	info = NULL;

err_nomem:
	dev_err(&pdev->dev, "initialization failed.\n");

	return ret;
}

static int __devinit s5p_tmu_remove(struct platform_device *pdev)
{
	struct s5p_tmu_info *info = platform_get_drvdata(pdev);

	cancel_delayed_work(&info->polling);
	destroy_workqueue(tmu_monitor_wq);

	device_remove_file(&pdev->dev, &dev_attr_temperature);
	device_remove_file(&pdev->dev, &dev_attr_tmu_state);

	if (info->irq >= 0)
		free_irq(info->irq, info);

	iounmap(info->tmu_base);

	release_resource(info->ioarea);
	kfree(info->ioarea);

	kfree(info);
	info = NULL;

	pr_info("%s is removed\n", dev_name(&pdev->dev));
	return 0;
}

#ifdef CONFIG_PM
static int s5p_tmu_suspend(struct platform_device *pdev, pm_message_t state)
{
	struct s5p_tmu_info *info = platform_get_drvdata(pdev);

	if (!info)
		return -EAGAIN;

	/* save register value */
	info->reg_save[0] = __raw_readl(info->tmu_base + EXYNOS4_TMU_CONTROL);
	info->reg_save[1] = __raw_readl(info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL);
	info->reg_save[2] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0);
	info->reg_save[3] = __raw_readl(info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1);
	info->reg_save[4] = __raw_readl(info->tmu_base + EXYNOS4_TMU_INTEN);

	if (soc_is_exynos4210()) {
		info->reg_save[5] =
			__raw_readl(info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP);
		info->reg_save[6] =
			 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0);
		info->reg_save[7] =
			 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1);
		info->reg_save[8] =
			 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2);
		info->reg_save[9] =
			 __raw_readl(info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3);
	} else {
		info->reg_save[5] =
			__raw_readl(info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE);
#if defined(CONFIG_TC_VOLTAGE)
		info->reg_save[6] = __raw_readl(info->tmu_base
					+ EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL);
#endif
	}
	disable_irq(info->irq);

	return 0;
}

static int s5p_tmu_resume(struct platform_device *pdev)
{
	struct s5p_tmu_info *info = platform_get_drvdata(pdev);
	struct s5p_platform_tmu *data;

	if (!info || !(info->dev))
		return -EAGAIN;

	data = info->dev->platform_data;

	/* restore tmu register value */
	__raw_writel(info->reg_save[0], info->tmu_base + EXYNOS4_TMU_CONTROL);
	__raw_writel(info->reg_save[1],
			info->tmu_base + EXYNOS4_TMU_SAMPLING_INTERNAL);
	__raw_writel(info->reg_save[2],
			info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE0);
	__raw_writel(info->reg_save[3],
			info->tmu_base + EXYNOS4_TMU_COUNTER_VALUE1);

	if (soc_is_exynos4210()) {
		__raw_writel(info->reg_save[5],
			info->tmu_base + EXYNOS4210_TMU_THRESHOLD_TEMP);
		__raw_writel(info->reg_save[6],
			info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL0);
		__raw_writel(info->reg_save[7],
			info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL1);
		__raw_writel(info->reg_save[8],
			info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL2);
		__raw_writel(info->reg_save[9],
			info->tmu_base + EXYNOS4210_TMU_TRIG_LEVEL3);
	} else {
		__raw_writel(info->reg_save[5],
			info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_RISE);
#if defined(CONFIG_TC_VOLTAGE)
		__raw_writel(info->reg_save[6],
			info->tmu_base + EXYNOS4x12_TMU_TRESHOLD_TEMP_FALL);
#endif
	}
	__raw_writel(info->reg_save[4],
			info->tmu_base + EXYNOS4_TMU_INTEN);

#if defined(CONFIG_TC_VOLTAGE)
	/* s/w workaround for fast service when interrupt is not occured,
	 * such as current temp is lower than tc interrupt temperature
	 * or current temp is continuosly increased..
	*/
	mdelay(1);
	if (get_curr_temp(info) <= data->ts.start_tc) {
		if (exynos_tc_volt(info, 1) < 0)
			pr_err("TMU: lock error!\n");
	}
#endif
	/* Find out tmu_state after wakeup */
	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, 0);

	return 0;
}
#else
#define s5p_tmu_suspend	NULL
#define s5p_tmu_resume	NULL
#endif

static struct platform_driver s5p_tmu_driver = {
	.probe		= s5p_tmu_probe,
	.remove		= s5p_tmu_remove,
	.suspend	= s5p_tmu_suspend,
	.resume		= s5p_tmu_resume,
	.driver		= {
		.name   = "s5p-tmu",
		.owner  = THIS_MODULE,
	},
};

static int __init s5p_tmu_driver_init(void)
{
	return platform_driver_register(&s5p_tmu_driver);
}

static void __exit s5p_tmu_driver_exit(void)
{
	platform_driver_unregister(&s5p_tmu_driver);
}
예제 #19
0
int _charger_state_change_( int category, int value, bool is_sleep )
// ----------------------------------------------------------------------------
// Description    : 
// Input Argument : 
// Return Value   :  
{	
	printk( "[BR] cate: %d, value: %d\n", category, value );

	if( category == STATUS_CATEGORY_CABLE )
	{
		switch( value )
		{
		case POWER_SUPPLY_TYPE_BATTERY :
			/*Stop monitoring the batt. level for Re-charging*/
			sec_bci.battery.monitor_field_rechg_vol = false;

			/*Stop monitoring the temperature*/
			sec_bci.battery.monitor_field_temp = false;

			sec_bci.battery.confirm_full_by_current = 0;
				//sec_bci.battery.confirm_changing_freq = 0;
			sec_bci.battery.confirm_recharge = 0;

			sec_bci.charger.charging_timeout = DEFAULT_CHARGING_TIMEOUT;

			sec_bci.charger.full_charge_dur_sleep = 0x0;
			break;

		case POWER_SUPPLY_TYPE_MAINS :
			sec_bci.charger.charging_timeout = DEFAULT_CHARGING_TIMEOUT;
			wake_lock_timeout( &sec_bc_wakelock , HZ );			
			break;

		case POWER_SUPPLY_TYPE_USB :			
			break;

		default :
			;
		}

		goto Out_Charger_State_Change;
	}
	else if( category == STATUS_CATEGORY_CHARGING )
	{
		switch( value )
		{
		case POWER_SUPPLY_STATUS_UNKNOWN :
		case POWER_SUPPLY_STATUS_NOT_CHARGING :
			//sec_bci.charger.full_charge = false;
			
			/*Stop monitoring the batt. level for Re-charging*/
			sec_bci.battery.monitor_field_rechg_vol = false;

			if( sec_bci.battery.battery_health != POWER_SUPPLY_HEALTH_OVERHEAT 
				&& sec_bci.battery.battery_health != POWER_SUPPLY_HEALTH_COLD )
			{
				/*Stop monitoring the temperature*/
				sec_bci.battery.monitor_field_temp = false;
			}

			break;

		case POWER_SUPPLY_STATUS_DISCHARGING :
			//sec_bci.charger.full_charge = false;

			break;

		case POWER_SUPPLY_STATUS_FULL :
			//sec_bci.charger.full_charge = true;
			
			/*Start monitoring the batt. level for Re-charging*/
			sec_bci.battery.monitor_field_rechg_vol = true;

			/*Stop monitoring the temperature*/
			sec_bci.battery.monitor_field_temp = false;

			wake_lock_timeout( &sec_bc_wakelock , HZ );

			break;

		case POWER_SUPPLY_STATUS_CHARGING :
			/*Start monitoring the temperature*/
			sec_bci.battery.monitor_field_temp = true;

			/*Stop monitoring the batt. level for Re-charging*/
			sec_bci.battery.monitor_field_rechg_vol = false;

			break;

		case POWER_SUPPLY_STATUS_RECHARGING_FOR_FULL :

			//sec_bci.charger.charging_timeout = DEFAULT_RECHARGING_TIMEOUT;
			
			/*Start monitoring the temperature*/
			sec_bci.battery.monitor_field_temp = true;

			/*Stop monitoring the batt. level for Re-charging*/
			sec_bci.battery.monitor_field_rechg_vol = false;

			break;

		case POWER_SUPPLY_STATUS_RECHARGING_FOR_TEMP :
			/*Start monitoring the temperature*/
			sec_bci.battery.monitor_field_temp = true;

			/*Stop monitoring the batt. level for Re-charging*/
			sec_bci.battery.monitor_field_rechg_vol = false;

			break;

		default :
			break;
		}

	}
	else
	{


	}
    
	if( !is_sleep )
	{
		struct battery_device_info *di;
		struct platform_device *pdev;

		pdev = to_platform_device( this_dev );
		di = platform_get_drvdata( pdev );

		cancel_delayed_work( &di->battery_monitor_work );
		queue_delayed_work( sec_bci.sec_battery_workq, &di->battery_monitor_work, 5 * HZ );	

		power_supply_changed( &di->sec_battery );
		power_supply_changed( &di->sec_ac );
		power_supply_changed( &di->sec_usb );

	}
	else
	{
		release_gptimer12( &batt_gptimer_12 );
		request_gptimer12( &batt_gptimer_12 );
	}

Out_Charger_State_Change :
	
    return 0;
}
예제 #20
0
static int palmas_usb_probe(struct platform_device *pdev)
{
	struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
	struct palmas_platform_data *pdata;
	struct palmas_extcon_platform_data *epdata = NULL;
	struct device_node *node = pdev->dev.of_node;
	struct palmas_usb *palmas_usb;
	int status;
	const char *ext_name = NULL;

	palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL);
	if (!palmas_usb)
		return -ENOMEM;

	pdata = dev_get_platdata(pdev->dev.parent);
	if (pdata)
		epdata = pdata->extcon_pdata;

	if (node && !epdata) {
		palmas_usb->wakeup = of_property_read_bool(node, "ti,wakeup");
		palmas_usb->enable_id_detection = of_property_read_bool(node,
						"ti,enable-id-detection");
		palmas_usb->enable_vbus_detection = of_property_read_bool(node,
						"ti,enable-vbus-detection");
		status = of_property_read_string(node, "extcon-name", &ext_name);
		if (status < 0)
			ext_name = NULL;
	} else {
		palmas_usb->wakeup = true;
		palmas_usb->enable_id_detection = true;
		palmas_usb->enable_vbus_detection = true;

		if (epdata) {
			palmas_usb->wakeup = epdata->wakeup;
			palmas_usb->enable_id_detection =
					epdata->enable_id_pin_detection;
			palmas_usb->enable_vbus_detection =
					epdata->enable_vbus_detection;
			if (palmas_usb->enable_id_detection)
				palmas_usb->wakeup = true;
			ext_name = epdata->connection_name;
		}
	}

	palmas_usb->palmas = palmas;
	palmas_usb->dev	 = &pdev->dev;
	palmas_usb->cable_debounce_time = 300;

	palmas_usb->id_otg_irq = palmas_irq_get_virq(palmas, PALMAS_ID_OTG_IRQ);
	palmas_usb->id_irq = palmas_irq_get_virq(palmas, PALMAS_ID_IRQ);
	palmas_usb->vbus_otg_irq = palmas_irq_get_virq(palmas,
						PALMAS_VBUS_OTG_IRQ);
	palmas_usb->vbus_irq = palmas_irq_get_virq(palmas, PALMAS_VBUS_IRQ);

	palmas_usb_wakeup(palmas, palmas_usb->wakeup);

	platform_set_drvdata(pdev, palmas_usb);

	palmas_usb->edev.supported_cable = palmas_extcon_cable;
	palmas_usb->edev.mutually_exclusive = mutually_exclusive;
	palmas_usb->edev.name  = (ext_name) ? ext_name : dev_name(&pdev->dev);

	status = extcon_dev_register(&palmas_usb->edev, palmas_usb->dev);
	if (status < 0) {
		dev_err(&pdev->dev, "failed to register extcon device\n");
		return status;
	}

	if (palmas_usb->enable_id_detection) {
		status = devm_request_threaded_irq(palmas_usb->dev,
				palmas_usb->id_irq,
				NULL, palmas_id_irq_handler,
				IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
				IRQF_ONESHOT | IRQF_EARLY_RESUME,
				"palmas_usb_id", palmas_usb);
		if (status < 0) {
			dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
					palmas_usb->id_irq, status);
			goto fail_extcon;
		}
		status = devm_request_threaded_irq(palmas_usb->dev,
				palmas_usb->id_otg_irq,
				NULL, palmas_id_irq_handler,
				IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
				IRQF_ONESHOT | IRQF_EARLY_RESUME,
				"palmas_usb_id-otg", palmas_usb);
		if (status < 0) {
			dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
					palmas_usb->id_irq, status);
			goto fail_extcon;
		}
		INIT_DELAYED_WORK(&palmas_usb->cable_update_wq,
				palmas_usb_id_st_wq);
	}

	if (palmas_usb->enable_vbus_detection) {
		status = devm_request_threaded_irq(palmas_usb->dev,
				palmas_usb->vbus_irq, NULL,
				palmas_vbus_irq_handler,
				IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
				IRQF_ONESHOT | IRQF_EARLY_RESUME,
				"palmas_usb_vbus", palmas_usb);
		if (status < 0) {
			dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
					palmas_usb->vbus_irq, status);
			goto fail_extcon;
		}
	}

	palmas_enable_irq(palmas_usb);
	device_set_wakeup_capable(&pdev->dev, true);
	return 0;

fail_extcon:
	extcon_dev_unregister(&palmas_usb->edev);
	if (palmas_usb->enable_id_detection)
		cancel_delayed_work(&palmas_usb->cable_update_wq);

	return status;
}
/*
 * row_dispatch_requests() - selects the next request to dispatch
 * @q:		requests queue
 * @force:	ignored
 *
 * Return 0 if no requests were moved to the dispatch queue.
 *	  1 otherwise
 *
 */
static int row_dispatch_requests(struct request_queue *q, int force)
{
	struct row_data *rd = (struct row_data *)q->elevator->elevator_data;
	int ret = 0, currq, i;

	currq = rd->curr_queue;

	/*
	 * Find the first unserved queue (with higher priority then currq)
	 * that is not empty
	 */
	for (i = 0; i < currq; i++) {
		if (row_rowq_unserved(rd, i) &&
		    !list_empty(&rd->row_queues[i].rqueue.fifo)) {
			row_log_rowq(rd, currq,
				" Preemting for unserved rowq%d", i);
			rd->curr_queue = i;
			row_dispatch_insert(rd);
			ret = 1;
			goto done;
		}
	}

	if (rd->row_queues[currq].rqueue.nr_dispatched >=
	    rd->row_queues[currq].disp_quantum) {
		rd->row_queues[currq].rqueue.nr_dispatched = 0;
		row_log_rowq(rd, currq, "Expiring rqueue");
		ret = row_choose_queue(rd);
		if (ret)
			row_dispatch_insert(rd);
		goto done;
	}

	/* Dispatch from curr_queue */
	if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
		/* check idling */
		if (delayed_work_pending(&rd->read_idle.idle_work)) {
			if (force) {
				(void)cancel_delayed_work(
				&rd->read_idle.idle_work);
				row_log_rowq(rd, currq,
					"Canceled delayed work - forced dispatch");
			} else {
				row_log_rowq(rd, currq,
						 "Delayed work pending. Exiting");
				goto done;
			}
		}

		if (!force && queue_idling_enabled[currq] &&
		    rd->row_queues[currq].rqueue.idle_data.begin_idling) {
			if (!queue_delayed_work(rd->read_idle.idle_workqueue,
						&rd->read_idle.idle_work,
						rd->read_idle.idle_time)) {
				row_log_rowq(rd, currq,
					     "Work already on queue!");
				pr_err("ROW_BUG: Work already on queue!");
			} else
				row_log_rowq(rd, currq,
				     "Scheduled delayed work. exiting");
			goto done;
		} else {
			row_log_rowq(rd, currq,
				     "Currq empty. Choose next queue");
			ret = row_choose_queue(rd);
			if (!ret)
				goto done;
		}
	}

	ret = 1;
	row_dispatch_insert(rd);

done:
	return ret;
}
예제 #22
0
static void smdhsic_disconnect(struct usb_interface *intf)
{
	int devid;
	struct usb_interface *smd_intf;
	struct str_intf_priv *intfpriv;
	struct usb_device *device = NULL;

	pr_info("%s: Called\n", __func__);

	intfpriv = usb_get_intfdata(intf);
	if (!intfpriv) {
		pr_err("%s: intfpriv is NULL\n", __func__);
		goto err_get_intfdata;
	}
	device = get_usb_device(intfpriv);
	devid = GET_DEVID(intfpriv->devid);
	pr_debug("%s : devid : %d\n", __func__, devid);

	smd_intf = get_usb_intf(intfpriv);
	if (!smd_intf) {
		pr_err("smd_intf is NULL\n");
		goto err_get_usb_intf;
	}

	if (smd_intf != intf) {
		pr_err("smd_intf is not same intf\n");
		goto err_mismatched_intf;
	}

	usb_driver_release_interface(get_usb_driver(intf), smd_intf);

	if (!device)
		usb_put_dev(device);

	pm_runtime_disable(&device->dev);
	if (g_usbdev.hsic)
		cancel_delayed_work(&g_usbdev.hsic->pm_runtime_work);

	switch (devid) {
	case FMT_DEV_ID:
		flush_txurb(&g_usbdev.ipc_urbq);
		flush_txurb(&g_usbdev.data_urbq);
		smdctl_request_connection_recover(true);
	case RAW_DEV_ID:
	case RFS_DEV_ID:
	case CMD_DEV_ID:
	case DOWN_DEV_ID:
		if (emu_discon_func[devid])
			emu_discon_func[devid](g_usbdev.smd_device[devid]);
		else
			kfree(intfpriv->data);
		break;
	default:
		pr_warn("%s:Undefined Callback Function\n",
		       __func__);
	}
	/* to prevent sleep at connection recover
	* when, usb suspend and recover routine overlap
	* it makes huge delay on modem reset
	*/
	wake_lock_timeout(&g_usbdev.txwake, 20*HZ);
	kfree(intfpriv);
	usb_set_intfdata(intf, NULL);
	g_usbdev.usbdev = NULL;
	g_usbdev.suspended = 0;
	g_usbdev.hsic = NULL;
	return;

err_mismatched_intf:
err_get_usb_intf:
	if (!device)
		usb_put_dev(device);
err_get_intfdata:
	pr_err("release(2) : %p\n", intf);
	usb_driver_release_interface(get_usb_driver(intf), intf);
	return;
}
예제 #23
0
void
nfs4_renewd_prepare_shutdown(struct nfs_server *server)
{
	cancel_delayed_work(&server->nfs_client->cl_renewd);
}
예제 #24
0
void cifs_dfs_release_automount_timer(void)
{
	BUG_ON(!list_empty(&cifs_dfs_automount_list));
	cancel_delayed_work(&cifs_dfs_automount_task);
	flush_scheduled_work();
}
예제 #25
0
int read_psh_data(struct psh_ia_priv *ia_data)
{
#if 0
	struct psh_ext_if *psh_if_info =
			(struct psh_ext_if *)ia_data->platform_priv;
	int cur_read = 0, ret = 0;
	struct frame_head fh;
	struct timespec t1,t2,t3,t4;	

	struct spi_message msg;
	
	struct spi_transfer xfer_fh = {
	
		.rx_buf		= (void *)&fh,
		.len		= sizeof(fh)
	
	};

	struct spi_transfer xfer_payload = {
		.rx_buf = (void *)&psh_if_info->psh_frame
	};
	
    int gpio_val = -1;
	int sequent_dummy = 0;
	static int loop = 0;

#ifdef ENABLE_RPM
	/* We may need to zero all the buffer */
	pm_runtime_get_sync(&psh_if_info->pshc->dev);
#endif 
	psh_if_info->gpio_psh_int = GPIO_PSH_INT;

	gpio_val = gpio_get_value(psh_if_info->gpio_psh_int);

	dev_dbg(&psh_if_info->pshc->dev, "%s, gpio_val=%d\n", __func__, gpio_val);
	
	/* Loop read till error or no more data */
	while (!gpio_get_value(psh_if_info->gpio_psh_int)) {
		
		char *ptr;
		int len;

		if (ia_data->cmd_in_progress == CMD_RESET)
			break;
		else if (ia_data->cmd_in_progress != CMD_INVALID)
			schedule();

		if (sequent_dummy >= 2) {
			/* something wrong, check FW */
			dev_dbg(&psh_if_info->pshc->dev,
				"2 sequent dummy frame header read!");
			break;
		}

		spi_message_init(&msg);
		spi_message_add_tail(&xfer_fh, &msg);

		ktime_get_ts(&t1);
		ret = spi_sync(psh_if_info->pshc, &msg);
		ktime_get_ts(&t3);
		
		if (ret) {
			dev_err(&psh_if_info->pshc->dev, "Read frame header error!"
					" ret=%d\n", ret);
			loop++;
			break;
		}

		dev_dbg(&psh_if_info->pshc->dev, "sign=0x%x(0x4853), len=%d\n", fh.sign, fh.length);

		if (fh.sign == LBUF_CELL_SIGN) {
			if (fh.length > LBUF_MAX_CELL_SIZE) {
				dev_err(&psh_if_info->pshc->dev, "frame size is too big!\n");
				ret = -EPERM;
				break;
			}
			sequent_dummy = 0;
		} else {
			if (fh.sign || fh.length) {
				dev_err(&psh_if_info->pshc->dev, "wrong fh (0x%x, 0x%x)\n",
						fh.sign, fh.length);
				ret = -EPERM;
				break;
			}
			sequent_dummy++;
			continue;
		}

		//len = frame_size(fh.length) - sizeof(fh);
		len = fh.length;
		xfer_payload.len = len;

		dev_dbg(&psh_if_info->pshc->dev, "%s xfer_payload.len=%d\n", __func__, len);
	
		
		spi_message_init(&msg);
		spi_message_add_tail(&xfer_payload, &msg);
		ret = spi_sync(psh_if_info->pshc, &msg);
		
		if (ret) {
			dev_err(&psh_if_info->pshc->dev, "Read main frame error!"
				   " ret=%d\n", ret);
			break;
		}

		ptr = psh_if_info->psh_frame;
		
		ktime_get_ts(&t4);
		//dump_cmd_resp(ptr, len);
		
		while (len > 0) {
			struct cmd_resp *resp = (struct cmd_resp *)ptr;
			u32 size = sizeof(*resp) + resp->data_len;

			ret = ia_handle_frame(ia_data, ptr, size);
			if (ret > 0) {
				cur_read += ret;

				if (cur_read > 250) {
					cur_read = 0;
					sysfs_notify(&psh_if_info->pshc->dev.kobj,
						NULL, "data_size");
				}
			}
			//ptr += frame_size(size);
			//len -= frame_size(size);
			ptr += size;
			len -= size;
		}
	}

#ifdef ENABLE_RPM
	pm_runtime_mark_last_busy(&psh_if_info->pshc->dev);
	pm_runtime_put_autosuspend(&psh_if_info->pshc->dev);
#endif 

	if (cur_read){
		sysfs_notify(&psh_if_info->pshc->dev.kobj, NULL, "data_size");
	    
		ktime_get_ts(&t2);
	
		long elapsedTime_t12 = timespec_to_ns(&t2) - timespec_to_ns(&t1);
		long elapsedTime_t13 = timespec_to_ns(&t3) - timespec_to_ns(&t1);
		long elapsedTime_t34 = timespec_to_ns(&t4) - timespec_to_ns(&t3);
		long elapsedTime_t42 = timespec_to_ns(&t2) - timespec_to_ns(&t4);

		dev_dbg(&psh_if_info->pshc->dev, "elapsedTime_t12 = %lld ns, t13 = %lld ns, t34 = %lld ns, t42 = %lld ns\n", 
												elapsedTime_t12,
												elapsedTime_t13,
												elapsedTime_t34,
												elapsedTime_t42);

	}
	if (loop > 8) {
		queue_work(psh_if_info->wq, &psh_if_info->work);
		loop = 0;
    }

	return ret;
#else
    return 0;
#endif
}
 
#define CMD_START_STREAMING (3)
#define CMD_STOP_STREAMING  (4)


void dump_tx_buf(struct ia_cmd *cmd, int len)
{
	struct sensor_cfg_param *stream_cfg;

	printk(KERN_DEBUG "%s, tran_id=%d, cmd_id=%d, sensor_id=%d\n", __func__, cmd->tran_id, cmd->cmd_id, cmd->sensor_id);

	if(cmd->cmd_id == CMD_START_STREAMING){
		stream_cfg = (struct sensor_cfg_param *)cmd->param;
		printk(KERN_DEBUG"sample_freq=%d, buffer_delay=%d\n", stream_cfg->sample_freq, stream_cfg->buff_delay);		
	}
		
}

#define HOST2PSH_PACKET_LEN (16)
#if 0
int process_send_cmd(struct psh_ia_priv *ia_data,
			int ch, struct ia_cmd *cmd, int len)
{
	struct psh_ext_if *psh_if_info =
			(struct psh_ext_if *)ia_data->platform_priv;
	int ret = 0;
	int i = 0;
	char cmd_buf[HOST2PSH_PACKET_LEN];	

	// fix host2psh package len to 16 
	len = HOST2PSH_PACKET_LEN;

	memset(cmd_buf, '\0', HOST2PSH_PACKET_LEN);
	memcpy(cmd_buf, (char *)cmd, len);
	
	struct spi_message msg;
	struct spi_transfer xfer = {
		.len = len, 
		.tx_buf = (void *)cmd_buf
	};

	for(i=0; i<len; i++)
		dev_dbg(&psh_if_info->pshc->dev," %d ", cmd_buf[i]);
	
	dev_dbg(&psh_if_info->pshc->dev,"\n");

	pm_runtime_get_sync(&psh_if_info->pshc->dev);

/*
		Host needs to reset FW for each boot up by sending CMD_RESET
		Once FW reboot, host will enable interrupt and wait data from sensorhub
*/
/*
 process_send_cmd+0x11f/0x1d0
 ia_send_cmd+0x7f/0x140
 ia_start_control+0xe5/0x1a0
 dev_attr_store+0x18/0x30
 sysfs_write_file+0xe7/0x160
 vfs_write+0xbe/0x1e0
 SyS_write+0x4d/0xa0
 ia32_do_call+0x13/0x13
*/

	if (ch == 0 && cmd->cmd_id == CMD_RESET) {
		if (psh_if_info->irq_disabled == 0) {
			disable_irq(psh_if_info->pshc->irq);
			psh_if_info->irq_disabled = 1;
			dev_info(&psh_if_info->pshc->dev, "%s disable irq %d\n", psh_if_info->pshc->irq);
		}

		/* first send soft reset to disable sensors running,
			or sensor I2C bus may hang */
		spi_message_init(&msg);
		spi_message_add_tail(&xfer, &msg);

		ret = spi_sync(psh_if_info->pshc, &msg);
		
		msleep(200);

		gpio_set_value(psh_if_info->gpio_psh_rst, 0);
		usleep_range(10000, 10000);
		gpio_set_value(psh_if_info->gpio_psh_rst, 1);

		/* wait for pshfw to run */
		msleep(1000);

		if (psh_if_info->irq_disabled == 1) {
			dev_info(&psh_if_info->pshc->dev, "%s enable irq %d\n", psh_if_info->pshc->irq);
			enable_irq(psh_if_info->pshc->irq);
			psh_if_info->irq_disabled = 0;
		}
	} else if (ch == 0 && cmd->cmd_id == CMD_FW_UPDATE) {
		if (psh_if_info->irq_disabled == 0) {
			dev_info(&psh_if_info->pshc->dev, "%s disable irq %d\n", psh_if_info->pshc->irq);
			disable_irq(psh_if_info->pshc->irq);
			psh_if_info->irq_disabled = 1;
		}

		msleep(1000);

		ret = 0;

		goto exit;
	} else if (ch == 0 && psh_if_info->irq_disabled == 1) {
		/* prevent sending command during firmware updating,
		 * or update will fail.
		 */
		ret = -EPERM;

		goto exit;
	}

		while(!gpio_get_value(psh_if_info->gpio_psh_int)){

                       msleep(1);
        }

		spi_message_init(&msg);
		spi_message_add_tail(&xfer, &msg);
		ret = spi_sync(psh_if_info->pshc, &msg);

		dump_tx_buf(cmd, len);
	
		if (ret) {
			dev_err(&psh_if_info->pshc->dev, "sendcmd through spi fail!\n");
			ret = -EIO;
		} else {
			ret = 0;
		}

#ifdef DRV_POLLING_MODE	
	if(cmd->cmd_id == CMD_START_STREAMING)
	{
		dev_err(&psh_if_info->pshc->dev, "%s start_stream\n", __func__);
		queue_delayed_work(psh_if_info->wq, &psh_if_info->dwork, POLLING_HZ);
	}
	else if (cmd->cmd_id == CMD_STOP_STREAMING)
	{
		dev_err(&psh_if_info->pshc->dev, "%s stop_stream\n", __func__);
		cancel_delayed_work(&psh_if_info->dwork);
	}
#endif 	

	pm_runtime_mark_last_busy(&psh_if_info->pshc->dev);

exit:
	pm_runtime_put_autosuspend(&psh_if_info->pshc->dev);

	return ret;
}
#else

static void build_transfer_buffer(void* lp_new_package_buffer, 
                                  void* lp_buffer, int buffer_size)
{
    struct frame_head* lp_fh = 
        (struct frame_head*)lp_new_package_buffer;
    
    INIT_FRAME_HEAD(lp_fh, buffer_size);
    
    memcpy(lp_fh + 1, lp_buffer, buffer_size);
    
    return;
}

/*buffer size is just fh's palyload total size, not include fh head size*/
struct send_list_entry*  build_send_list_entry(void* lp_buffer, int buffer_size)
{
    struct send_list_entry* lp_new_entry = NULL;
    int total_size = buffer_size + SIZE_OF_FRAME_HEAD;

    if (!lp_buffer || total_size  > MAX_SEND_DATA_SIZE)
    {
        return NULL;
    }

    lp_new_entry = kzalloc(sizeof(struct send_list_entry), GFP_KERNEL);
    if (lp_new_entry)
    {
        build_transfer_buffer(lp_new_entry->data,
                              lp_buffer,
                              buffer_size);

        lp_new_entry->used_size = total_size;
        lp_new_entry->debug_index = 0;
    }

    return lp_new_entry;
}


void insert_send_data_entry_to_list(struct psh_ext_if* lp_psh_if_info,
                                    struct send_list_entry* lp_new_entry)
{
    mutex_lock(&lp_psh_if_info->send_data_list.lock);

    //add new to tail of the list
    list_add(&lp_new_entry->list_entry,  lp_psh_if_info->send_data_list.head.prev);

    mutex_unlock(&lp_psh_if_info->send_data_list.lock);
}

struct send_list_entry* remove_send_data_entry_from_list(struct psh_ext_if* lp_psh_if_info)
{
    struct send_list_entry* lp_removed_entry;

    mutex_lock(&lp_psh_if_info->send_data_list.lock);

    lp_removed_entry = NULL;
    if (!list_empty(&lp_psh_if_info->send_data_list.head))
    {
        lp_removed_entry = list_entry(lp_psh_if_info->send_data_list.head.next,
                                      struct send_list_entry,
                                      list_entry);
        
        list_del(lp_psh_if_info->send_data_list.head.next);
    }
    
    mutex_unlock(&lp_psh_if_info->send_data_list.lock);

    return lp_removed_entry;
}

/* The len is actual cmd size include parameter sizes*/
int process_send_cmd(struct psh_ia_priv *ia_data,
			int ch, struct ia_cmd *cmd, int len)
{
	struct psh_ext_if *psh_if_info =
			(struct psh_ext_if *)ia_data->platform_priv;
	int ret = 0;
	int i = 0;
	char cmd_buf[HOST2PSH_PACKET_LEN];	

    struct send_list_entry* lp_new_entry = NULL;

    /*yy: remove this limitation*/
	// fix host2psh package len to 16 
	//len = HOST2PSH_PACKET_LEN;
    //len += (sizeof(struct ia_cmd) - CMD_PARAM_MAX_SIZE);
    //	memset(cmd_buf, '\0', HOST2PSH_PACKET_LEN);
	// memcpy(cmd_buf, (char *)cmd, len);    
    lp_new_entry = build_send_list_entry(cmd, len);
    if (!lp_new_entry)
    {
        dev_err(&psh_if_info->pshc->dev," drop send data becuause no enough memory.\n");
        return -1;
    }

	for(i=0; i<len; i++)
		dev_dbg(&psh_if_info->pshc->dev," %d ", cmd_buf[i]);
	
	dev_dbg(&psh_if_info->pshc->dev,"\n");

	pm_runtime_get_sync(&psh_if_info->pshc->dev);

/*
		Host needs to reset FW for each boot up by sending CMD_RESET
		Once FW reboot, host will enable interrupt and wait data from sensorhub
*/
/*
 process_send_cmd+0x11f/0x1d0
 ia_send_cmd+0x7f/0x140
 ia_start_control+0xe5/0x1a0
 dev_attr_store+0x18/0x30
 sysfs_write_file+0xe7/0x160
 vfs_write+0xbe/0x1e0
 SyS_write+0x4d/0xa0
 ia32_do_call+0x13/0x13
*/

    /*
      put the send data entry to send list 
      and request delay worker
     */
    insert_send_data_entry_to_list(psh_if_info, lp_new_entry);
    /*         
#ifdef DRV_POLLING_MODE	
	if(cmd->cmd_id == CMD_START_STREAMING)
	{
		dev_err(&psh_if_info->pshc->dev, "%s start_stream\n", __func__);
        psh_if_info->task_flag = TASK_FLAG_REQUEST_LOOP;
		queue_delayed_work(psh_if_info->wq, &psh_if_info->dwork, POLLING_HZ);
	}
	else if (cmd->cmd_id == CMD_STOP_STREAMING)
	{
		dev_err(&psh_if_info->pshc->dev, "%s stop_stream\n", __func__);
		cancel_delayed_work(&psh_if_info->dwork);
	}
#endif 	
    */

	pm_runtime_mark_last_busy(&psh_if_info->pshc->dev);
	pm_runtime_put_autosuspend(&psh_if_info->pshc->dev);

	return ret;
}

/*
  The len is actual cmd size include parameter sizes
  please make sure you already paused pulling thread
  before you call this api!
*/
int process_send_cmd_sync(struct psh_ia_priv *ia_data,
                          int ch, struct ia_cmd *cmd, int len)
{
	int ret = -1;
    
	struct psh_ext_if *psh_if_info =
        (struct psh_ext_if *)ia_data->platform_priv;
    
    struct send_list_entry* lp_new_entry = NULL;

    //len += (sizeof(struct ia_cmd) - CMD_PARAM_MAX_SIZE);
    
	pm_runtime_get_sync(&psh_if_info->pshc->dev);

    lp_new_entry = build_send_list_entry(cmd, len);
    if (lp_new_entry)
    {
        ret = do_transfer(psh_if_info,
                          lp_new_entry->data,
                          lp_new_entry->used_size,
                          NULL);

        kfree(lp_new_entry);
    }
    
	pm_runtime_mark_last_busy(&psh_if_info->pshc->dev);
	pm_runtime_put_autosuspend(&psh_if_info->pshc->dev);
    
	return ret;
}
예제 #26
0
static int modem_startup(struct usb_serial *serial)
{
	struct usb_serial_port *port = serial->port[0];
	struct modem_port *modem_port_ptr = NULL;
	struct usb_interface *interface;
	struct usb_endpoint_descriptor *endpoint;
	struct usb_endpoint_descriptor *epread = NULL;
	struct usb_endpoint_descriptor *epwrite = NULL;
	struct usb_host_interface *iface_desc;
	unsigned long flags;
	int readsize;
	int num_rx_buf;
	int i;
	int retval = 0;

	interface = serial->interface;
	iface_desc = interface->cur_altsetting;

	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
		endpoint = &iface_desc->endpoint[i].desc;
		if (usb_endpoint_is_bulk_in(endpoint))
			epread = endpoint;
		if (usb_endpoint_is_bulk_out(endpoint))
			epwrite = endpoint;
	}

	if (epread == NULL) {
		dev_err(&serial->dev->dev,
			 "%s: No Bulk In Endpoint for this Interface\n",
			 __func__);
		return -EPERM;
	}
	if (epwrite == NULL) {
		dev_err(&serial->dev->dev,
			 "%s: No Bulk Out Endpoint for this Interface\n",
			 __func__);
		return -EPERM;
	}

	num_rx_buf = AP_NR;
	readsize = le16_to_cpu(epread->wMaxPacketSize) * 2;

	/* setup a buffer to store interface data */
	modem_port_ptr =
	    kzalloc(sizeof(struct modem_port), GFP_KERNEL);
	if (modem_port_ptr == NULL) {
		dev_err(&serial->dev->dev,
			 "%s: error -- no memory on start up.\n",
			 __func__);
		return -ENOMEM;
	}

	/* init tasklet for rx processing */
	tasklet_init(&modem_port_ptr->urb_task, modem_rx_tasklet,
		     (unsigned long)modem_port_ptr);
	modem_port_ptr->rx_buflimit = num_rx_buf;
	modem_port_ptr->rx_endpoint =
		usb_rcvbulkpipe(serial->dev, port->bulk_in_endpointAddress);
	spin_lock_init(&modem_port_ptr->read_lock);
	spin_lock_init(&modem_port_ptr->write_lock);
	spin_lock_init(&modem_port_ptr->last_traffic_lock);

	atomic_set(&modem_port_ptr->wakeup_flag, 0);
	modem_port_ptr->serial = serial;
	modem_port_ptr->susp_count = 0;
	modem_port_ptr->resuming = 0;
	modem_port_ptr->port = 0;
	modem_port_ptr->last_traffic = 0;
	modem_port_ptr->readsize = readsize;
	modem_port_ptr->writesize = le16_to_cpu(epwrite->wMaxPacketSize) * 20;
	modem_port_ptr->number = modem_attached_ports++;

	INIT_WORK(&modem_port_ptr->wake_and_write, modem_wake_and_write);
	INIT_WORK(&modem_port_ptr->usb_wkup_work, modem_usb_wkup_work);

	if (modem_write_buffers_alloc(modem_port_ptr, serial) < 0) {
		dev_err(&serial->dev->dev,
			"%s: out of memory\n", __func__);
		goto alloc_write_buf_fail;
	}

	/* allocate multiple receive urb pool */
	for (i = 0; i < num_rx_buf; i++) {
		struct ap_ru *rcv = &(modem_port_ptr->ru[i]);

		rcv->urb = usb_alloc_urb(0, GFP_KERNEL);
		if (rcv->urb == NULL) {
			dev_err(&serial->dev->dev,
				"%s: out of memory\n", __func__);
			goto alloc_rb_urb_fail;
		}

		rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
		rcv->instance = modem_port_ptr;
	}

	/* allocate multiple receive buffer */
	for (i = 0; i < num_rx_buf; i++) {
		struct ap_rb *rb = &(modem_port_ptr->rb[i]);

		rb->base = usb_buffer_alloc(serial->dev, readsize,
					GFP_KERNEL, &rb->dma);
		if (!rb->base) {
			dev_err(&serial->dev->dev,
				 "%s : out of memory\n",
				__func__);
			goto alloc_rb_buffer_fail;
		}
	}
	for (i = 0; i < AP_NW; i++) {
		struct ap_wb *snd = &(modem_port_ptr->wb[i]);

		snd->urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!snd->urb) {
			dev_err(&serial->dev->dev, "%s : out of memory "
				"(write urbs usb_alloc_urb)\n", __func__);
			goto alloc_wb_urb_fail;
		}
		usb_fill_bulk_urb(snd->urb, serial->dev,
				usb_sndbulkpipe(serial->dev,
					epwrite->bEndpointAddress),
				NULL, modem_port_ptr->writesize,
				modem_write_bulk_callback, snd);
		snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
		snd->instance = modem_port_ptr;
	}

	modem_port_ptr->modem_status = 0;

	/* The modem has presented a usb interface, remove the shutdown timer. */
	spin_lock_irqsave(&modem.lock, flags);
	if (modem.connected == false) {
		modem.connected = true;
		cancel_delayed_work(&modem.delayed_pwr_shutdown);
	}
	spin_unlock_irqrestore(&modem.lock, flags);

	/* install serial private data */
	usb_set_serial_data(serial, modem_port_ptr);

	if (modem_port_ptr->number == MODEM_INTERFACE_NUM) {
		modem.wq = create_singlethread_workqueue("mdm6600_usb_wq");
		wake_lock_init(&modem.wakelock, WAKE_LOCK_SUSPEND,
				"mdm6600_usb_modem");
		/* install the BP GPIO wakeup irq and disable it first */
		if (modem_wake_irq) {
			retval = request_irq(
				modem_wake_irq,
				gpio_wkup_interrupt_handler,
				IRQ_DISABLED | IRQ_TYPE_EDGE_FALLING,
				"mdm6600_usb_wakeup", modem_port_ptr);

			if (retval)
				dev_err(&interface->dev,
					"%s request_irq failed \n", __func__);
			else
				disable_irq(modem_wake_irq);
		}
	}

	return 0;

alloc_wb_urb_fail:
	for (i = 0; i < AP_NW; i++)
		usb_free_urb(modem_port_ptr->wb[i].urb);
alloc_rb_buffer_fail:
	modem_read_buffers_free(modem_port_ptr, serial);
alloc_rb_urb_fail:
	for (i = 0; i < num_rx_buf; i++)
		usb_free_urb(modem_port_ptr->ru[i].urb);
alloc_write_buf_fail:
	modem_write_buffers_free(modem_port_ptr, serial);
	if (modem_port_ptr != NULL) {
		kfree(modem_port_ptr);
		usb_set_serial_data(serial, NULL);
	}
	modem_attached_ports--;
	return -ENOMEM;
}
예제 #27
0
static int iwl_send_scan_abort(struct iwl_priv *priv)
{
	int ret = 0;
	struct iwl_rx_packet *res;
	struct iwl_host_cmd cmd = {
		.id = REPLY_SCAN_ABORT_CMD,
		.meta.flags = CMD_WANT_SKB,
	};

	/* If there isn't a scan actively going on in the hardware
	 * then we are in between scan bands and not actually
	 * actively scanning, so don't send the abort command */
	if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
		clear_bit(STATUS_SCAN_ABORTING, &priv->status);
		return 0;
	}

	ret = iwl_send_cmd_sync(priv, &cmd);
	if (ret) {
		clear_bit(STATUS_SCAN_ABORTING, &priv->status);
		return ret;
	}

	res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
	if (res->u.status != CAN_ABORT_STATUS) {
		/* The scan abort will return 1 for success or
		 * 2 for "failure".  A failure condition can be
		 * due to simply not being in an active scan which
		 * can occur if we send the scan abort before we
		 * the microcode has notified us that a scan is
		 * completed. */
		IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
		clear_bit(STATUS_SCAN_ABORTING, &priv->status);
		clear_bit(STATUS_SCAN_HW, &priv->status);
	}

	priv->alloc_rxb_skb--;
	dev_kfree_skb_any(cmd.meta.u.skb);

	return ret;
}


/* Service response to REPLY_SCAN_CMD (0x80) */
static void iwl_rx_reply_scan(struct iwl_priv *priv,
			      struct iwl_rx_mem_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
	struct iwl_scanreq_notification *notif =
	    (struct iwl_scanreq_notification *)pkt->u.raw;

	IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
#endif
}

/* Service SCAN_START_NOTIFICATION (0x82) */
static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
				    struct iwl_rx_mem_buffer *rxb)
{
	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
	struct iwl_scanstart_notification *notif =
	    (struct iwl_scanstart_notification *)pkt->u.raw;
	priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
	IWL_DEBUG_SCAN("Scan start: "
		       "%d [802.11%s] "
		       "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
		       notif->channel,
		       notif->band ? "bg" : "a",
		       le32_to_cpu(notif->tsf_high),
		       le32_to_cpu(notif->tsf_low),
		       notif->status, notif->beacon_timer);
}

/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
				      struct iwl_rx_mem_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
	struct iwl_scanresults_notification *notif =
	    (struct iwl_scanresults_notification *)pkt->u.raw;

	IWL_DEBUG_SCAN("Scan ch.res: "
		       "%d [802.11%s] "
		       "(TSF: 0x%08X:%08X) - %d "
		       "elapsed=%lu usec (%dms since last)\n",
		       notif->channel,
		       notif->band ? "bg" : "a",
		       le32_to_cpu(notif->tsf_high),
		       le32_to_cpu(notif->tsf_low),
		       le32_to_cpu(notif->statistics[0]),
		       le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
		       jiffies_to_msecs(elapsed_jiffies
					(priv->last_scan_jiffies, jiffies)));
#endif

	priv->last_scan_jiffies = jiffies;
	priv->next_scan_jiffies = 0;
}

/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
				       struct iwl_rx_mem_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
	struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;

	IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
		       scan_notif->scanned_channels,
		       scan_notif->tsf_low,
		       scan_notif->tsf_high, scan_notif->status);
#endif

	/* The HW is no longer scanning */
	clear_bit(STATUS_SCAN_HW, &priv->status);

	/* The scan completion notification came in, so kill that timer... */
	cancel_delayed_work(&priv->scan_check);

	IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
		       (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
						"2.4" : "5.2",
		       jiffies_to_msecs(elapsed_jiffies
					(priv->scan_pass_start, jiffies)));

	/* Remove this scanned band from the list of pending
	 * bands to scan, band G precedes A in order of scanning
	 * as seen in iwl_bg_request_scan */
	if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
		priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
	else if (priv->scan_bands &  BIT(IEEE80211_BAND_5GHZ))
		priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);

	/* If a request to abort was given, or the scan did not succeed
	 * then we reset the scan state machine and terminate,
	 * re-queuing another scan if one has been requested */
	if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
		IWL_DEBUG_INFO("Aborted scan completed.\n");
		clear_bit(STATUS_SCAN_ABORTING, &priv->status);
	} else {
		/* If there are more bands on this scan pass reschedule */
		if (priv->scan_bands)
			goto reschedule;
	}

	priv->last_scan_jiffies = jiffies;
	priv->next_scan_jiffies = 0;
	IWL_DEBUG_INFO("Setting scan to off\n");

	clear_bit(STATUS_SCANNING, &priv->status);

	IWL_DEBUG_INFO("Scan took %dms\n",
		jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));

	queue_work(priv->workqueue, &priv->scan_completed);

	return;

reschedule:
	priv->scan_pass_start = jiffies;
	queue_work(priv->workqueue, &priv->request_scan);
}

void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
{
	/* scan handlers */
	priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
	priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
	priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
					iwl_rx_scan_results_notif;
	priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
					iwl_rx_scan_complete_notif;
}
예제 #28
0
void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
{
	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
	int ret = 0;
	enum ieee80211_band band;
	u32 rate, mask;

	switch (wl->scan.state) {
	case WL1271_SCAN_STATE_IDLE:
		break;

	case WL1271_SCAN_STATE_2GHZ_ACTIVE:
		band = IEEE80211_BAND_2GHZ;
		mask = wlvif->bitrate_masks[band];
		if (wl->scan.req->no_cck) {
			mask &= ~CONF_TX_CCK_RATES;
			if (!mask)
				mask = CONF_TX_RATE_MASK_BASIC_P2P;
		}
		rate = wl1271_tx_min_rate_get(wl, mask);
		ret = wl1271_scan_send(wl, vif, band, false, rate);
		if (ret == WL1271_NOTHING_TO_SCAN) {
			wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
			wl1271_scan_stm(wl, vif);
		}

		break;

	case WL1271_SCAN_STATE_2GHZ_PASSIVE:
		band = IEEE80211_BAND_2GHZ;
		mask = wlvif->bitrate_masks[band];
		if (wl->scan.req->no_cck) {
			mask &= ~CONF_TX_CCK_RATES;
			if (!mask)
				mask = CONF_TX_RATE_MASK_BASIC_P2P;
		}
		rate = wl1271_tx_min_rate_get(wl, mask);
		ret = wl1271_scan_send(wl, vif, band, true, rate);
		if (ret == WL1271_NOTHING_TO_SCAN) {
			if (wl->enable_11a)
				wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
			else
				wl->scan.state = WL1271_SCAN_STATE_DONE;
			wl1271_scan_stm(wl, vif);
		}

		break;

	case WL1271_SCAN_STATE_5GHZ_ACTIVE:
		band = IEEE80211_BAND_5GHZ;
		rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
		ret = wl1271_scan_send(wl, vif, band, false, rate);
		if (ret == WL1271_NOTHING_TO_SCAN) {
			wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
			wl1271_scan_stm(wl, vif);
		}

		break;

	case WL1271_SCAN_STATE_5GHZ_PASSIVE:
		band = IEEE80211_BAND_5GHZ;
		rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
		ret = wl1271_scan_send(wl, vif, band, true, rate);
		if (ret == WL1271_NOTHING_TO_SCAN) {
			wl->scan.state = WL1271_SCAN_STATE_DONE;
			wl1271_scan_stm(wl, vif);
		}

		break;

	case WL1271_SCAN_STATE_DONE:
		wl->scan.failed = false;
		cancel_delayed_work(&wl->scan_complete_work);
		ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
					     msecs_to_jiffies(0));
		break;

	default:
		wl1271_error("invalid scan state");
		break;
	}

	if (ret < 0) {
		cancel_delayed_work(&wl->scan_complete_work);
		ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
					     msecs_to_jiffies(0));
	}
}
static void stop_rq_work(void)
{
	if (rq_data->nr_run_wq)
		cancel_delayed_work(&rq_data->work);
	return;
}
예제 #30
0
static int __devinit s3c_rtc_probe(struct platform_device *pdev)
{
	struct rtc_device *rtc;
	struct resource *res;
        unsigned char bcd_tmp,bcd_loop;
	int ret;
#ifdef CONFIG_RTC_DRV_MAX8998
	struct rtc_time tm;
#endif

	pr_debug("%s: probe=%p\n", __func__, pdev);

	/* find the IRQs */

	s3c_rtc_tickno = platform_get_irq(pdev, 1);
	if (s3c_rtc_tickno < 0) {
		dev_err(&pdev->dev, "no irq for rtc tick\n");
		return -ENOENT;
	}

	s3c_rtc_alarmno = platform_get_irq(pdev, 0);
	if (s3c_rtc_alarmno < 0) {
		dev_err(&pdev->dev, "no irq for alarm\n");
		return -ENOENT;
	}

	pr_debug("s3c2410_rtc: tick irq %d, alarm irq %d\n",
		 s3c_rtc_tickno, s3c_rtc_alarmno);

	/* get the memory region */

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "failed to get memory region resource\n");
		return -ENOENT;
	}

	s3c_rtc_mem = request_mem_region(res->start,
					 res->end-res->start+1,
					 pdev->name);

	if (s3c_rtc_mem == NULL) {
		dev_err(&pdev->dev, "failed to reserve memory region\n");
		ret = -ENOENT;
		goto err_nores;
	}

	s3c_rtc_base = ioremap(res->start, res->end - res->start + 1);
	if (s3c_rtc_base == NULL) {
		dev_err(&pdev->dev, "failed ioremap()\n");
		ret = -EINVAL;
		goto err_nomap;
	}

	/* check to see if everything is setup correctly */

	s3c_rtc_enable(pdev, 1);

 	pr_debug("s3c2410_rtc: RTCCON=%02x\n",
		 readb(s3c_rtc_base + S3C2410_RTCCON));

	s3c_rtc_setfreq(&pdev->dev, 1);

	device_init_wakeup(&pdev->dev, 1);

#ifdef CONFIG_RTC_DRV_MAX8998
	max8998_rtc_read_time(&tm);
#endif

	/* register RTC and exit */

	rtc = rtc_device_register("s3c", &pdev->dev, &s3c_rtcops,
				  THIS_MODULE);

	if (IS_ERR(rtc)) {
		dev_err(&pdev->dev, "cannot attach rtc\n");
		ret = PTR_ERR(rtc);
		goto err_nortc;
	}

	rtc->max_user_freq = S3C_MAX_CNT;

#ifdef CONFIG_RTC_DRV_MAX8998
	s3c_rtc_settime(rtc, &tm);  //update from pmic
#endif

#ifdef SET_RTC_DEFAULT_RESET_TIME
	{
		struct rtc_time tm;

		s3c_rtc_gettime (pdev, &tm);
		if (rtc_valid_tm (&tm) != 0)
		{
			struct rtc_time reset_tm = {
				.tm_sec = DEFAULT_RESET_TIME_SEC,
				.tm_min = DEFAULT_RESET_TIME_MIN,
				.tm_hour = DEFAULT_RESET_TIME_HOUR,
				.tm_mday = DEFAULT_RESET_TIME_DATE,
				.tm_mon = DEFAULT_RESET_TIME_MON - 1,
				.tm_year = DEFAULT_RESET_TIME_YEAR - 1900,
				};

			s3c_rtc_settime (pdev, &reset_tm);
			#ifdef CONFIG_RTC_DRV_MAX8998
			max8998_rtc_set_time(&reset_tm); // also update pmic rtc as default 
			#endif
		}
	}
#else

	/* check rtc time */
	for (bcd_loop = S3C2410_RTCSEC ; bcd_loop <= S3C2410_RTCYEAR ; bcd_loop +=0x4)
	{
		bcd_tmp = readb(s3c_rtc_base + bcd_loop);
		if(((bcd_tmp & 0xf) > 0x9) || ((bcd_tmp & 0xf0) > 0x90))
			writeb(0, s3c_rtc_base + bcd_loop);
	}
#endif /* SET_RTC_DEFAULT_RESET_TIME */

	platform_set_drvdata(pdev, rtc);

#ifdef CONFIG_RTC_S3C_SYNC_SYSTEM_TIME
	rtc_sync_start_save_delta();
#endif	/* CONFIG_RTC_S3C_SYNC_SYSTEM_TIME */

	return 0;

 err_nortc:
	s3c_rtc_enable(pdev, 0);
	iounmap(s3c_rtc_base);

 err_nomap:
	release_resource(s3c_rtc_mem);

 err_nores:
	return ret;
}

#ifdef CONFIG_PM

/* RTC Power management control */

static struct timespec s3c_rtc_delta;
static int ticnt_save;

static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
	struct rtc_time tm;
	struct timespec time;

	time.tv_nsec = 0;
	/* save TICNT for anyone using periodic interrupts */
	ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);

	s3c_rtc_gettime(&pdev->dev, &tm);
	rtc_tm_to_time(&tm, &time.tv_sec);
	save_time_delta(&s3c_rtc_delta, &time);

	if (gpio_get_value(GPIO_WLAN_BT_EN) == 0) /* BCM4329 isnt working */
		s3c_rtc_enable(pdev, 0);
	
#ifdef CONFIG_RTC_S3C_SYNC_SYSTEM_TIME
	cancel_delayed_work(&rtc_sync_work);
#endif	/* CONFIG_RTC_S3C_SYNC_SYSTEM_TIME */

	return 0;
}

static int s3c_rtc_resume(struct platform_device *pdev)
{
	struct rtc_time tm;
	struct timespec time;

	time.tv_nsec = 0;

	if (gpio_get_value(GPIO_WLAN_BT_EN) == 0) /* BCM4329 isnt working */
		s3c_rtc_enable(pdev, 1);

	s3c_rtc_gettime(&pdev->dev, &tm);
	rtc_tm_to_time(&tm, &time.tv_sec);
	restore_time_delta(&s3c_rtc_delta, &time);
	writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);

#ifdef CONFIG_RTC_S3C_SYNC_SYSTEM_TIME
	rtc_sync_start ();
#endif

	return 0;
}
#else
#define s3c_rtc_suspend NULL
#define s3c_rtc_resume  NULL
#endif

static struct platform_driver s3c2410_rtc_driver = {
	.probe		= s3c_rtc_probe,
	.remove		= __devexit_p(s3c_rtc_remove),
	.suspend	= s3c_rtc_suspend,
	.resume		= s3c_rtc_resume,
	.driver		= {
		.name	= "s3c2410-rtc",
		.owner	= THIS_MODULE,
	},
};

static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics\n";

static int __init s3c_rtc_init(void)
{
	printk(banner);
	return platform_driver_register(&s3c2410_rtc_driver);
}