static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
{
	unsigned int update_time_intrvl;
	unsigned int chrg_val;
	u32 ccval;
	u8 r8;
	struct battery_property batt_prop;
	int batt_present = 0;
	int usb_present = 0;
	int batt_exception = 0;

	/*                                                                */
	if (pbi->update_time && time_before(jiffies, pbi->update_time +
						msecs_to_jiffies(delay_time)))
		return;

	update_time_intrvl = jiffies_to_msecs(jiffies -	pbi->update_time);
	pbi->update_time = jiffies;

	/*                                                      */
	if (pmic_scu_ipc_battery_cc_read(&ccval)) {
		dev_warn(pbi->dev, "%s(): ipc config cmd failed\n",
								__func__);
		return;
	}

	if (intel_scu_ipc_ioread8(PMIC_BATT_CHR_SCHRGINT_ADDR, &r8)) {
		dev_warn(pbi->dev, "%s(): ipc pmic read failed\n",
								__func__);
		return;
	}

	/*
                                                                    
         
  */

	/*                     */
	if (r8 & PMIC_BATT_CHR_SBATDET_MASK) {
		pbi->batt_is_present = PMIC_BATT_PRESENT;
		batt_present = 1;
	} else {
		pbi->batt_is_present = PMIC_BATT_NOT_PRESENT;
		pbi->batt_health = POWER_SUPPLY_HEALTH_UNKNOWN;
		pbi->batt_status = POWER_SUPPLY_STATUS_UNKNOWN;
	}

	/*                 */
	if (batt_present) {
		if (r8 & PMIC_BATT_CHR_SBATOVP_MASK) {
			pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
			pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
			pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
			batt_exception = 1;
		} else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
			pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
			pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
			pmic_battery_log_event(BATT_EVENT_TEMP_EXCPT);
			batt_exception = 1;
		} else {
			pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
			if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
				/*                                                 */
				pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
			}
		}
	}

	/*                    */
	if (r8 & PMIC_BATT_CHR_SUSBDET_MASK) {
		pbi->usb_is_present = PMIC_USB_PRESENT;
		usb_present = 1;
	} else {
		pbi->usb_is_present = PMIC_USB_NOT_PRESENT;
		pbi->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
	}

	if (usb_present) {
		if (r8 & PMIC_BATT_CHR_SUSBOVP_MASK) {
			pbi->usb_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
			pmic_battery_log_event(BATT_EVENT_USBOVP_EXCPT);
		} else {
			pbi->usb_health = POWER_SUPPLY_HEALTH_GOOD;
		}
	}

	chrg_val = ccval & PMIC_BATT_ADC_ACCCHRGVAL_MASK;

	/*                                                              */
	if (!pbi->is_dev_info_updated) {
		if (pmic_scu_ipc_battery_property_get(&batt_prop)) {
			dev_warn(pbi->dev, "%s(): ipc config cmd failed\n",
								__func__);
			return;
		}
		pbi->batt_prev_charge_full = batt_prop.capacity;
	}

	/*                 */
	if (batt_present && !batt_exception) {
		if (r8 & PMIC_BATT_CHR_SCOMP_MASK) {
			pbi->batt_status = POWER_SUPPLY_STATUS_FULL;
			pbi->batt_prev_charge_full = chrg_val;
		} else if (ccval & PMIC_BATT_ADC_ACCCHRG_MASK) {
			pbi->batt_status = POWER_SUPPLY_STATUS_DISCHARGING;
		} else {
			pbi->batt_status = POWER_SUPPLY_STATUS_CHARGING;
		}
	}

	/*                      */
	if (pbi->is_dev_info_updated && batt_present && !batt_exception) {
		if (pbi->batt_status == POWER_SUPPLY_STATUS_DISCHARGING) {
			if (pbi->batt_charge_now - chrg_val) {
				pbi->batt_charge_rate = ((pbi->batt_charge_now -
					chrg_val) * 1000 * 60) /
					update_time_intrvl;
			}
		} else if (pbi->batt_status == POWER_SUPPLY_STATUS_CHARGING) {
			if (chrg_val - pbi->batt_charge_now) {
				pbi->batt_charge_rate = ((chrg_val -
					pbi->batt_charge_now) * 1000 * 60) /
					update_time_intrvl;
			}
		} else
			pbi->batt_charge_rate = 0;
	} else {
		pbi->batt_charge_rate = -1;
	}

	/*                 */
	if (batt_present && !batt_exception)
		pbi->batt_charge_now = chrg_val;
	else
		pbi->batt_charge_now = -1;

	pbi->is_dev_info_updated = PMIC_BATT_DRV_INFO_UPDATED;
}
Exemple #2
0
/*
 *  ANI performs periodic noise floor calibration
 *  that is used to adjust and optimize the chip performance.  This
 *  takes environmental changes (location, temperature) into account.
 *  When the task is complete, it reschedules itself depending on the
 *  appropriate interval that was calculated.
 */
void ath_ani_calibrate(unsigned long data)
{
	struct ath_softc *sc = (struct ath_softc *)data;
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
	bool longcal = false;
	bool shortcal = false;
	bool aniflag = false;
	unsigned int timestamp = jiffies_to_msecs(jiffies);
	u32 cal_interval, short_cal_interval, long_cal_interval;
	unsigned long flags;

	if (ah->caldata && ah->caldata->nfcal_interference)
		long_cal_interval = ATH_LONG_CALINTERVAL_INT;
	else
		long_cal_interval = ATH_LONG_CALINTERVAL;

	short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
		ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;

	/* Only calibrate if awake */
	if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
		goto set_timer;

	ath9k_ps_wakeup(sc);

	/* Long calibration runs independently of short calibration. */
	if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
		longcal = true;
		common->ani.longcal_timer = timestamp;
	}

	/* Short calibration applies only while caldone is false */
	if (!common->ani.caldone) {
		if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
			shortcal = true;
			common->ani.shortcal_timer = timestamp;
			common->ani.resetcal_timer = timestamp;
		}
	} else {
		if ((timestamp - common->ani.resetcal_timer) >=
		    ATH_RESTART_CALINTERVAL) {
			common->ani.caldone = ath9k_hw_reset_calvalid(ah);
			if (common->ani.caldone)
				common->ani.resetcal_timer = timestamp;
		}
	}

	/* Verify whether we must check ANI */
	if (sc->sc_ah->config.enable_ani
	    && (timestamp - common->ani.checkani_timer) >=
	    ah->config.ani_poll_interval) {
		aniflag = true;
		common->ani.checkani_timer = timestamp;
	}

	/* Call ANI routine if necessary */
	if (aniflag) {
		spin_lock_irqsave(&common->cc_lock, flags);
		ath9k_hw_ani_monitor(ah, ah->curchan);
		ath_update_survey_stats(sc);
		spin_unlock_irqrestore(&common->cc_lock, flags);
	}

	/* Perform calibration if necessary */
	if (longcal || shortcal) {
		common->ani.caldone =
			ath9k_hw_calibrate(ah, ah->curchan,
					   ah->rxchainmask, longcal);
	}

	ath_dbg(common, ANI,
		"Calibration @%lu finished: %s %s %s, caldone: %s\n",
		jiffies,
		longcal ? "long" : "", shortcal ? "short" : "",
		aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");

	ath9k_debug_samp_bb_mac(sc);
	ath9k_ps_restore(sc);

set_timer:
	/*
	* Set timer interval based on previous results.
	* The interval must be the shortest necessary to satisfy ANI,
	* short calibration and long calibration.
	*/
	cal_interval = ATH_LONG_CALINTERVAL;
	if (sc->sc_ah->config.enable_ani)
		cal_interval = min(cal_interval,
				   (u32)ah->config.ani_poll_interval);
	if (!common->ani.caldone)
		cal_interval = min(cal_interval, (u32)short_cal_interval);

	mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
		if (!ah->caldata->paprd_done)
			ieee80211_queue_work(sc->hw, &sc->paprd_work);
		else if (!ah->paprd_table_write_done)
			ath_paprd_activate(sc);
	}
}
Exemple #3
0
static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
{
	struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
	struct adapter *padapter = dvobj->if1;
	struct net_device *pnetdev = padapter->pnetdev;
	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
	unsigned long start_time = jiffies;

	pr_debug("==> %s (%s:%d)\n", __func__, current->comm, current->pid);

	if ((!padapter->bup) || (padapter->bDriverStopped) ||
	    (padapter->bSurpriseRemoved)) {
		pr_debug("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
			padapter->bup, padapter->bDriverStopped,
			padapter->bSurpriseRemoved);
		goto exit;
	}

	pwrpriv->bInSuspend = true;
	rtw_cancel_all_timer(padapter);
	LeaveAllPowerSaveMode(padapter);

	mutex_lock(&pwrpriv->mutex_lock);
	/* s1. */
	if (pnetdev) {
		netif_carrier_off(pnetdev);
		netif_tx_stop_all_queues(pnetdev);
	}

	/* s2. */
	rtw_disassoc_cmd(padapter, 0, false);

	if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
	    check_fwstate(pmlmepriv, _FW_LINKED)) {
		pr_debug("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
			__func__, __LINE__,
			pmlmepriv->cur_network.network.Ssid.Ssid,
			pmlmepriv->cur_network.network.MacAddress,
			pmlmepriv->cur_network.network.Ssid.SsidLength,
			pmlmepriv->assoc_ssid.SsidLength);

		pmlmepriv->to_roaming = 1;
	}
	/* s2-2.  indicate disconnect to os */
	rtw_indicate_disconnect(padapter);
	/* s2-3. */
	rtw_free_assoc_resources(padapter);
	/* s2-4. */
	rtw_free_network_queue(padapter, true);

	rtw_dev_unload(padapter);
	mutex_unlock(&pwrpriv->mutex_lock);

	if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
		rtw_indicate_scan_done(padapter, 1);

	if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
		rtw_indicate_disconnect(padapter);

exit:
	pr_debug("<===  %s .............. in %dms\n", __func__,
		 jiffies_to_msecs(jiffies - start_time));

	return 0;
}
Exemple #4
0
static void curcial_oj_work_func(struct work_struct *work)
{
	struct curcial_oj_platform_data *oj = container_of(work, struct curcial_oj_platform_data, work);
	OJData_T  OJData;
	uint16_t i, j;
	uint8_t data[BURST_DATA_SIZE];
	uint32_t click_time = 0;
	uint32_t delta_time = 0;
	uint32_t entry_time = 0;
	OJKeyEvt_T	evtKey = OJ_KEY_NONE;
	uint8_t	x_count = 0;
	uint8_t	y_count = 0;
	bool out = false;
	uint8_t pxsum;
	uint16_t sht;
	int16_t	x_sum, x_idx;
	int16_t	y_sum, y_idx;



	mDeltaX = 0;
	mDeltaY = 0;
	oj->interval = interval;
	entry_time = jiffies_to_msecs(jiffies);
	x_sum = 0;
	y_sum = 0;

		do {
			memset(data, 0x00, sizeof(data));
			out = false;
			curcial_oj_burst_read(data);

			if (data[MOTION] != 0x60 && data[MOTION] != 0xe0) {
				pr_info("%s: OJ re-init enter\n", __func__);
				recoveryesd = 1;
				curcial_oj_init();
				break;
			}
			OJData.squal = data[SQUAL];
			pxsum = curcial_oj_register_read(0x09);
			sht =	((data[SHUTTER_UPPER] << 8)|data[SHUTTER_LOWER]);
			if (debugflag) {
				printk(KERN_INFO"OJ1:M=0x%02x Y=0x%02x X=0x%02x SQUAL=0x%02x "
				"SHU_U=0x%02x SHU_L=0x%02x pxsum=%d sht=%d  \n", data[MOTION], data[Y], data[X],
				 data[SQUAL], data[SHUTTER_UPPER], data[SHUTTER_LOWER], pxsum, sht);
			}
			if (ap_code) {
				for (i = 1; i < oj->degree; i++) {
					if (((oj->sht_tbl[i-1] < sht) && (sht <= oj->sht_tbl[i])) && (oj->pxsum_tbl[i] < pxsum)) {
						if (debugflag)
							printk("OJ:A.code_condition:%d\n", i);
						out = true;
						break;
					}
				}
			if (!out)
				goto exit;
			}
			oj->oj_adjust_xy(&data[MOTION], &mDeltaX, &mDeltaY);


				DeltaX[index] = (int8_t)mDeltaX;
				DeltaY[index] = (int8_t)mDeltaY;
			/*	printk(KERN_INFO"index=%d: DeltaX[]  = %d DeltaY[] = %d \n",index, DeltaX[index] , DeltaY[index]);*/
				if (++index == 64)
					index = 0;

			x_sum = x_sum + mDeltaX;
			y_sum = y_sum + mDeltaY;
			mSumDeltaX = mSumDeltaX + mDeltaX;
			mSumDeltaY = mSumDeltaY + mDeltaY;
			if (debugflag)
				printk(KERN_INFO"check:OJ:mSumDeltaX = %d mSumDeltaY = %d \n", mSumDeltaX, mSumDeltaY);

			evtKey = OJ_ProcessNavi(xy_ratio, normal_th, x_sum, y_sum);

			if (evtKey != OJ_KEY_NONE) {
				click_time = jiffies_to_msecs(jiffies);
				if (debugflag)
					printk(KERN_INFO"click_time=%x last_click_time=%x, %x\n", click_time, oj->last_click_time, click_time-oj->last_click_time);

			if (oj->last_click_time == 0) {
				oj->last_click_time = entry_time - oj->interval;
				oj->key = evtKey;
				}

			delta_time = 	click_time - entry_time;

			/*printk(KERN_INFO"x_sum=%d y_sum=%d, delta time=%dms\n", x_sum, y_sum, delta_time);*/

				if (click_time - oj->last_click_time < oj->interval) {
					evtKey = OJ_KEY_NONE;

				if (debugflag)
						printk(KERN_INFO"interval blocking < %d\n", oj->interval);
				} else if (click_time - oj->last_click_time < 80 && evtKey != oj->key) {
					evtKey = OJ_KEY_NONE;
					printk(KERN_INFO"sudden key ignore \n");
				}
			}

			x_idx = abs(x_sum) / normal_th;
			y_idx = abs(y_sum) / normal_th;
			if (x_idx >= ARRAY_SIZE(oj->Xsteps))
				x_idx = ARRAY_SIZE(oj->Xsteps) - 1;
			if (y_idx >= ARRAY_SIZE(oj->Ysteps))
				y_idx = ARRAY_SIZE(oj->Ysteps) - 1;

			x_count = oj->Xsteps[x_idx];
			y_count = oj->Ysteps[y_idx];
			if (evtKey == OJ_KEY_LEFT) {
				for (j = 0; j < x_count; j++) {
					input_report_rel(oj->input_dev, REL_X, -1);
					input_sync(oj->input_dev);
				}
				if (debugflag)
					printk(KERN_INFO"OJ:KEY_LEFT:%d\n", x_count);

			} else if (evtKey == OJ_KEY_RIGHT) {
				for (j = 0; j < x_count; j++) {
					input_report_rel(oj->input_dev, REL_X, 1);
					input_sync(oj->input_dev);
				}
				if (debugflag)
					printk(KERN_INFO"OJ:KEY_RIGHT:%d\n", x_count);

			} else if (evtKey == OJ_KEY_DOWN) {
				for (j = 0; j < y_count; j++) {
					input_report_rel(oj->input_dev, REL_Y, 1);
					input_sync(oj->input_dev);
				}
				if (debugflag)
					printk(KERN_INFO"OJ:KEY_DOWN:%d\n", y_count);

			} else if (evtKey == OJ_KEY_UP) {
				for (j = 0; j < y_count; j++) {
					input_report_rel(oj->input_dev, REL_Y, -1);
					input_sync(oj->input_dev);
				}
				if (debugflag)
					printk(KERN_INFO"OJ:KEY_UP:%d\n", y_count);
			}

			if (evtKey != OJ_KEY_NONE) {
				oj->key = evtKey;
				oj->last_click_time = click_time;
				x_sum = 0;
				y_sum = 0;
				/*goto exit;*/
			}
		mDeltaX = 0;
		mDeltaY = 0;
		if (polling_delay)
			hr_msleep(polling_delay);
			} while ((data[0] & 0x80) && (!atomic_read(&suspend_flag)));


exit:

	if (debugflag)
		printk(KERN_INFO"%s:-\n", __func__);

	enable_irq(oj->irq);
}
Exemple #5
0
/*
 *power on mipi module&init the parameters,such as color fmt...,will be used by vdin
 */
static int amcsi_feopen(struct tvin_frontend_s *fe, enum tvin_port_e port)
{
        struct amcsi_dev_s *csi_devp = container_of(fe, amcsi_dev_t, frontend);
        struct vdin_parm_s *parm = fe->private_data;
        csi_parm_t *p = &csi_devp->csi_parm;
        int ret;

        if((port != TVIN_PORT_MIPI)){
                DPRINT("[mipi..]%s:invaild port %d.\n",__func__, port);
                return -1;
        }
        /*copy the param from vdin to csi*/
        if(!memcpy(&csi_devp->para, parm, sizeof(vdin_parm_t))){
                DPRINT("[mipi..]%s memcpy error.\n",__func__);
                return -1;
        }

        init_am_mipi_csi2_clock();// init mipi csi measure clock
        csi_devp->para.port = port;

        memcpy( &csi_devp->csi_parm, &parm->csi_hw_info, sizeof( csi_parm_t));
        csi_devp->csi_parm.skip_frames = parm->skip_count;

        csi_devp->reset = 0;
        csi_devp->reset_count = 0;

#if 0
        csi_devp->irq_num = INT_MIPI_PHY; //INT_CSI2_HOST;
        ret = request_irq(csi_devp->irq_num, csi_hst_isr, IRQF_SHARED, "csi-hst1"/*devp->irq_name*/, csi_devp);
        //SET_CSI_HST_REG_MASK(MIPI_CSI2_HOST_MASK1, ~((1<< p->lanes) - 1));
        //WRITE_CSI_HST_REG_BITS(MIPI_CSI2_HOST_MASK1, 0, 28, 1); // enable err_ecc_double

        //SET_CSI_HST_REG_MASK(MIPI_CSI2_HOST_MASK1, ~((1<< p->lanes) - 1));
        DPRINT("INT_CSI2_HOST = %d, INT_CSI2_HOST_2=%d\n", INT_CSI2_HOST, INT_CSI2_HOST_2)
        DPRINT("mask1=%x\n", ~((1<< p->lanes) - 1));
#if 0
        csi_devp->irq_num = INT_CSI2_HOST_2;
        ret = request_irq(csi_devp->irq_num, csi_hst_isr, IRQF_SHARED, "csi-hst2"/*devp->irq_name*/, csi_devp);
#endif
        if( ret < 0 ){
                printk("failed to request csi_adapter irq \n");
        }
#endif

        init_timer (&csi_devp->t);
        csi_devp->t.data = csi_devp;
        csi_devp->t.function = csi2_timer_func;
        csi_devp->t.expires = jiffies + WDG_STEP_JIFFIES; //reset after 50ms=5jiffies
        if(0 == csi_devp->min_frmrate){
                csi_devp->min_frmrate = 1;
        }
        csi_devp->period = 1000 / parm->frame_rate;
        //printk("period=%d, jiffies=%d\n", csi_devp->period, msecs_to_jiffies(csi_devp->period));
        if(csi_devp->period <= jiffies_to_msecs(WDG_STEP_JIFFIES))
        {
                csi_devp->period = 0;
        }else{
                csi_devp->period -= jiffies_to_msecs(WDG_STEP_JIFFIES);
        }

        add_timer(&csi_devp->t);
        cal_csi_para(&csi_devp->csi_parm);
        am_mipi_csi2_init(&csi_devp->csi_parm);
        return 0;
        //csi_devp->skip_vdin_frame_count = parm->reserved;
}
Exemple #6
0
/*
 * Setup, register & probe an IDE channel driven by this driver, this is
 * called by one of the 2 probe functions (macio or PCI).
 */
static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
{
	struct device_node *np = pmif->node;
	const int *bidp;
	struct ide_host *host;
	ide_hwif_t *hwif;
	struct ide_hw *hws[] = { hw };
	struct ide_port_info d = pmac_port_info;
	int rc;

	pmif->broken_dma = pmif->broken_dma_warn = 0;
	if (of_device_is_compatible(np, "shasta-ata")) {
		pmif->kind = controller_sh_ata6;
		d.tp_ops = &pmac_ata6_tp_ops;
		d.port_ops = &pmac_ide_ata4_port_ops;
		d.udma_mask = ATA_UDMA6;
	} else if (of_device_is_compatible(np, "kauai-ata")) {
		pmif->kind = controller_un_ata6;
		d.tp_ops = &pmac_ata6_tp_ops;
		d.port_ops = &pmac_ide_ata4_port_ops;
		d.udma_mask = ATA_UDMA5;
	} else if (of_device_is_compatible(np, "K2-UATA")) {
		pmif->kind = controller_k2_ata6;
		d.tp_ops = &pmac_ata6_tp_ops;
		d.port_ops = &pmac_ide_ata4_port_ops;
		d.udma_mask = ATA_UDMA5;
	} else if (of_device_is_compatible(np, "keylargo-ata")) {
		if (strcmp(np->name, "ata-4") == 0) {
			pmif->kind = controller_kl_ata4;
			d.port_ops = &pmac_ide_ata4_port_ops;
			d.udma_mask = ATA_UDMA4;
		} else
			pmif->kind = controller_kl_ata3;
	} else if (of_device_is_compatible(np, "heathrow-ata")) {
		pmif->kind = controller_heathrow;
	} else {
		pmif->kind = controller_ohare;
		pmif->broken_dma = 1;
	}

	bidp = of_get_property(np, "AAPL,bus-id", NULL);
	pmif->aapl_bus_id =  bidp ? *bidp : 0;

	/* On Kauai-type controllers, we make sure the FCR is correct */
	if (pmif->kauai_fcr)
		writel(KAUAI_FCR_UATA_MAGIC |
		       KAUAI_FCR_UATA_RESET_N |
		       KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr);
	
	/* Make sure we have sane timings */
	sanitize_timings(pmif);

	/* If we are on a media bay, wait for it to settle and lock it */
	if (pmif->mdev)
		lock_media_bay(pmif->mdev->media_bay);

	host = ide_host_alloc(&d, hws, 1);
	if (host == NULL) {
		rc = -ENOMEM;
		goto bail;
	}
	hwif = pmif->hwif = host->ports[0];

	if (on_media_bay(pmif)) {
		/* Fixup bus ID for media bay */
		if (!bidp)
			pmif->aapl_bus_id = 1;
	} else if (pmif->kind == controller_ohare) {
		/* The code below is having trouble on some ohare machines
		 * (timing related ?). Until I can put my hand on one of these
		 * units, I keep the old way
		 */
		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
	} else {
 		/* This is necessary to enable IDE when net-booting */
		ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
		ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
		msleep(10);
		ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
		msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
	}

	printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
	       "bus ID %d%s, irq %d\n", model_name[pmif->kind],
	       pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
	       on_media_bay(pmif) ? " (mediabay)" : "", hw->irq);

	rc = ide_host_register(host, &d, hws);
	if (rc)
		pmif->hwif = NULL;

	if (pmif->mdev)
		unlock_media_bay(pmif->mdev->media_bay);

 bail:
	if (rc && host)
		ide_host_free(host);
	return rc;
}
static int iwmct_probe(struct sdio_func *func,
			   const struct sdio_device_id *id)
{
	struct iwmct_priv *priv;
	int ret;
	int val = 1;
	int addr = IWMC_SDIO_INTR_ENABLE_ADDR;

	dev_dbg(&func->dev, "enter iwmct_probe\n");

	dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
		jiffies_to_msecs(2147483647), HZ);

	priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
	if (!priv) {
		dev_err(&func->dev, "kzalloc error\n");
		return -ENOMEM;
	}
	priv->func = func;
	sdio_set_drvdata(func, priv);


	/* create drivers work queue */
	priv->wq = create_workqueue(DRV_NAME "_wq");
	priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
	INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
	INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);

	init_waitqueue_head(&priv->wait_q);

	sdio_claim_host(func);
	/* FIXME: Remove after it is fixed in the Boot ROM upgrade */
	func->enable_timeout = 10;

	/* In our HW, setting the block size also wakes up the boot rom. */
	ret = sdio_set_block_size(func, priv->dbg.block_size);
	if (ret) {
		LOG_ERROR(priv, INIT,
			"sdio_set_block_size() failure: %d\n", ret);
		goto error_sdio_enable;
	}

	ret = sdio_enable_func(func);
	if (ret) {
		LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
		goto error_sdio_enable;
	}

	/* init reset and dev_sync states */
	atomic_set(&priv->reset, 0);
	atomic_set(&priv->dev_sync, 0);

	/* init read req queue */
	INIT_LIST_HEAD(&priv->read_req_list);

	/* process configurable parameters */
	iwmct_dbg_init_params(priv);
	ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
	if (ret) {
		LOG_ERROR(priv, INIT, "Failed to register attributes and "
			 "initialize module_params\n");
		goto error_dev_attrs;
	}

	iwmct_dbgfs_register(priv, DRV_NAME);

	if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
		LOG_INFO(priv, INIT,
			 "Reducing transaction to 8 blocks = 2K (from %d)\n",
			 priv->dbg.download_trans_blks);
		priv->dbg.download_trans_blks = 8;
	}
	priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
	LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);

	ret = sdio_claim_irq(func, iwmct_irq);
	if (ret) {
		LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
		goto error_claim_irq;
	}


	/* Enable function's interrupt */
	sdio_writeb(priv->func, val, addr, &ret);
	if (ret) {
		LOG_ERROR(priv, INIT, "Failure writing to "
			  "Interrupt Enable Register (%d): %d\n", addr, ret);
		goto error_enable_int;
	}

	sdio_release_host(func);

	LOG_INFO(priv, INIT, "exit iwmct_probe\n");

	return ret;

error_enable_int:
	sdio_release_irq(func);
error_claim_irq:
	sdio_disable_func(func);
error_dev_attrs:
	iwmct_dbgfs_unregister(priv->dbgfs);
	sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
error_sdio_enable:
	sdio_release_host(func);
	return ret;
}
Exemple #8
0
static INT32 wmt_dev_tm_temp_query(void)
{
    #define HISTORY_NUM       5
    #define TEMP_THRESHOLD   65
    #define REFRESH_TIME    300 //sec
    
    static INT32 temp_table[HISTORY_NUM] = {99}; //not query yet.
    static INT32 idx_temp_table = 0;
    static struct timeval query_time, now_time;

    INT8  query_cond = 0;
    INT32 current_temp = 0;
    INT32 index = 0;

    //Query condition 1:
    // If we have the high temperature records on the past, we continue to query/monitor 
    // the real temperature until cooling
    for(index = 0; index < HISTORY_NUM ; index++)
    {
       if(temp_table[index] >= TEMP_THRESHOLD)
       {
            query_cond = 1;
            WMT_INFO_FUNC("high temperature (current temp = %d), we must keep querying temp temperature..\n", temp_table[index]);
       }            
    }

    do_gettimeofday(&now_time);

    // Query condition 2:
    // Moniter the hif_sdio activity to decide if we have the need to query temperature.
    if(!query_cond)
    {
        if( wmt_dev_tra_sdio_poll()==0)
        {
            query_cond = 1;
            WMT_INFO_FUNC("sdio traffic , we must query temperature..\n");
        }
        else
        {
            WMT_DBG_FUNC("sdio idle traffic ....\n");
        }

        //only WIFI tx power might make temperature varies largely
        #if 0
        if(!query_cond)
        {
            last_access_time = wmt_dev_tra_uart_poll();
            if( jiffies_to_msecs(last_access_time) < TIME_THRESHOLD_TO_TEMP_QUERY)
            {
                query_cond = 1;
                WMT_DBG_FUNC("uart busy traffic , we must query temperature..\n");
            }
            else
            {
                WMT_DBG_FUNC("uart still idle traffic , we don't query temp temperature..\n");
            }
        }
        #endif
    }
    
    // Query condition 3:
    // If the query time exceeds the a certain of period, refresh temp table.
    //
    if(!query_cond)
    {
        if( (now_time.tv_sec < query_time.tv_sec) || //time overflow, we refresh temp table again for simplicity!
            ((now_time.tv_sec > query_time.tv_sec) && 
            (now_time.tv_sec - query_time.tv_sec) > REFRESH_TIME))
        {               
            query_cond = 1;

            WMT_INFO_FUNC("It is long time (> %d sec) not to query, we must query temp temperature..\n", REFRESH_TIME);
            for (index = 0; index < HISTORY_NUM ; index++)
            {
                temp_table[index] = 99;                
            }
        }
    }
        
    if(query_cond)
    {
        // update the temperature record
        mtk_wcn_wmt_therm_ctrl(WMTTHERM_ENABLE);
        current_temp = mtk_wcn_wmt_therm_ctrl(WMTTHERM_READ);
        mtk_wcn_wmt_therm_ctrl(WMTTHERM_DISABLE);
        wmt_lib_notify_stp_sleep();
        idx_temp_table = (idx_temp_table + 1) % HISTORY_NUM;
        temp_table[idx_temp_table] = current_temp;
        do_gettimeofday(&query_time);

        WMT_INFO_FUNC("[Thermal] current_temp = 0x%x \n", (current_temp & 0xFF));
    }
    else
    {
        current_temp = temp_table[idx_temp_table];
        idx_temp_table = (idx_temp_table + 1) % HISTORY_NUM;
        temp_table[idx_temp_table] = current_temp;             
    }

    //
    // Dump information
    //    
    WMT_DBG_FUNC("[Thermal] idx_temp_table = %d \n", idx_temp_table);
    WMT_DBG_FUNC("[Thermal] now.time = %d, query.time = %d, REFRESH_TIME = %d\n", now_time.tv_sec, query_time.tv_sec, REFRESH_TIME);

    WMT_DBG_FUNC("[0] = %d, [1] = %d, [2] = %d, [3] = %d, [4] = %d \n----\n", 
        temp_table[0], temp_table[1], temp_table[2], temp_table[3], temp_table[4]);
    
    return current_temp;
}
Exemple #9
0
static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
	int cmd_idx;
	int ret;

	lockdep_assert_held(&priv->mutex);

	 /* A synchronous command can not have a callback set. */
	if (WARN_ON(cmd->callback))
		return -EINVAL;

	IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
			get_cmd_string(cmd->id));

	set_bit(STATUS_HCMD_ACTIVE, &priv->status);
	IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
			get_cmd_string(cmd->id));

	cmd_idx = iwl_enqueue_hcmd(priv, cmd);
	if (cmd_idx < 0) {
		ret = cmd_idx;
		clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
		IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
			  get_cmd_string(cmd->id), ret);
		return ret;
	}

	ret = wait_event_interruptible_timeout(priv->wait_command_queue,
			!test_bit(STATUS_HCMD_ACTIVE, &priv->status),
			HOST_COMPLETE_TIMEOUT);
	if (!ret) {
		if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
			IWL_ERR(priv,
				"Error sending %s: time out after %dms.\n",
				get_cmd_string(cmd->id),
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

			clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
			IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command"
				 "%s\n", get_cmd_string(cmd->id));
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

	if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
		IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
			       get_cmd_string(cmd->id));
		ret = -ECANCELED;
		goto fail;
	}
	if (test_bit(STATUS_FW_ERROR, &priv->status)) {
		IWL_ERR(priv, "Command %s failed: FW Error\n",
			       get_cmd_string(cmd->id));
		ret = -EIO;
		goto fail;
	}
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
		IWL_ERR(priv, "Error: Response NULL in '%s'\n",
			  get_cmd_string(cmd->id));
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
		priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
							~CMD_WANT_SKB;
	}
fail:
	if (cmd->reply_page) {
		iwl_free_pages(priv, cmd->reply_page);
		cmd->reply_page = 0;
	}

	return ret;
}
Exemple #10
0
/*  the input parameter start must be in jiffies */
inline s32 rtw_get_passing_time_ms(u32 start)
{
	return jiffies_to_msecs(jiffies-start);
}
Exemple #11
0
/*--------------------------------------------------------------------------
  
  \brief vos_timer_get_system_ticks() - Get the system time in 10ms ticks

  The \a vos_timer_get_system_ticks() function returns the current number
  of timer ticks in 10msec intervals.  This function is suitable timestamping
  and calculating time intervals by calculating the difference between two 
  timestamps.
    
  \returns - The current system tick count (in 10msec intervals).  This 
             function cannot fail.
  
  \sa
  
  ------------------------------------------------------------------------*/
v_TIME_t vos_timer_get_system_ticks( v_VOID_t )
{
   return( jiffies_to_msecs(jiffies) / 10 );
}
extern fm_s32 fm_print_evt_fifo(void)
{
#ifdef FM_TRACE_ENABLE
    struct fm_trace_t trace;
    fm_s32 i = 0;

    while (fm_false == FM_TRACE_EMPTY(evt_fifo)) {
        fm_memset(trace.pkt, 0, FM_TRACE_PKT_SIZE);
        FM_TRACE_OUT(evt_fifo, &trace);
        WCN_DBG(FM_ALT | LINK, "%s: op %d, len %d, %d\n", evt_fifo->name, trace.opcode, trace.len, jiffies_to_msecs(abs(trace.time)));
        i = 0;
        while ((trace.len > 0) && (i < trace.len) && (i < (FM_TRACE_PKT_SIZE-8))) {
            WCN_DBG(FM_ALT | LINK, "%s: %02x %02x %02x %02x %02x %02x %02x %02x\n", \
                    evt_fifo->name, trace.pkt[i], trace.pkt[i+1], trace.pkt[i+2], trace.pkt[i+3], trace.pkt[i+4], trace.pkt[i+5], trace.pkt[i+6], trace.pkt[i+7]);
            i += 8;
        }
        WCN_DBG(FM_ALT | LINK, "%s\n", evt_fifo->name);
    }
#endif

    return 0;
}
/*
 * ------------------------------------------------------------
 * rs_close()
 * 
 * This routine is called when the serial port gets closed.  First, we
 * wait for the last remaining data to be sent.  Then, we unlink its
 * S structure from the interrupt chain if necessary, and we free
 * that IRQ if nothing is left in the chain.
 * ------------------------------------------------------------
 */
static void rs_close(struct tty_struct *tty, struct file * filp)
{
	struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
	struct tty_port *port = &info->tport;
	m68328_uart *uart = &uart_addr[info->line];
	unsigned long flags;

	if (serial_paranoia_check(info, tty->name, "rs_close"))
		return;
	
	local_irq_save(flags);
	
	if (tty_hung_up_p(filp)) {
		local_irq_restore(flags);
		return;
	}
	
	if ((tty->count == 1) && (port->count != 1)) {
		/*
		 * Uh, oh.  tty->count is 1, which means that the tty
		 * structure will be freed.  Info->count should always
		 * be one in these conditions.  If it's greater than
		 * one, we've got real problems, since it means the
		 * serial port won't be shutdown.
		 */
		printk("rs_close: bad serial port count; tty->count is 1, "
		       "port->count is %d\n", port->count);
		port->count = 1;
	}
	if (--port->count < 0) {
		printk("rs_close: bad serial port count for ttyS%d: %d\n",
		       info->line, port->count);
		port->count = 0;
	}
	if (port->count) {
		local_irq_restore(flags);
		return;
	}
	port->flags |= ASYNC_CLOSING;
	/*
	 * Now we wait for the transmit buffer to clear; and we notify 
	 * the line discipline to only process XON/XOFF characters.
	 */
	tty->closing = 1;
	if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
		tty_wait_until_sent(tty, port->closing_wait);
	/*
	 * At this point we stop accepting input.  To do this, we
	 * disable the receive line status interrupts, and tell the
	 * interrupt driver to stop checking the data ready bit in the
	 * line status register.
	 */

	uart->ustcnt &= ~USTCNT_RXEN;
	uart->ustcnt &= ~(USTCNT_RXEN | USTCNT_RX_INTR_MASK);

	shutdown(info, tty);
	rs_flush_buffer(tty);
		
	tty_ldisc_flush(tty);
	tty->closing = 0;
	tty_port_tty_set(&info->tport, NULL);
#warning "This is not and has never been valid so fix it"	
#if 0
	if (tty->ldisc.num != ldiscs[N_TTY].num) {
		if (tty->ldisc.close)
			(tty->ldisc.close)(tty);
		tty->ldisc = ldiscs[N_TTY];
		tty->termios.c_line = N_TTY;
		if (tty->ldisc.open)
			(tty->ldisc.open)(tty);
	}
#endif	
	if (port->blocked_open) {
		if (port->close_delay)
			msleep_interruptible(jiffies_to_msecs(port->close_delay));
		wake_up_interruptible(&port->open_wait);
	}
	port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
	wake_up_interruptible(&port->close_wait);
	local_irq_restore(flags);
}
Exemple #14
0
/*
 * This thread processes interrupts reported by the Primary Interrupt Handler.
 */
static int twl6030_irq_thread(void *data)
{
	long irq = (long)data;
	static unsigned i2c_errors;
	static const unsigned max_i2c_errors = 100;
	int ret;

	current->flags |= PF_NOFREEZE;

	while (!kthread_should_stop()) {
		int i;
		int start_time = 0;
		union {
		u8 bytes[4];
		u32 int_sts;
		} sts;

		/* Wait for IRQ, then read PIH irq status (also blocking) */
		wait_for_completion_interruptible(&irq_event);

		/* read INT_STS_A, B and C in one shot using a burst read */
		ret = twl_i2c_read(TWL_MODULE_PIH, sts.bytes,
				REG_INT_STS_A, 3);
		if (ret) {
			pr_warning("twl6030: I2C error %d reading PIH ISR\n",
					ret);
			if (++i2c_errors >= max_i2c_errors) {
				printk(KERN_ERR "Maximum I2C error count"
						" exceeded.  Terminating %s.\n",
						__func__);
				break;
			}
			complete(&irq_event);
			continue;
		}



		sts.bytes[3] = 0; /* Only 24 bits are valid*/

		/*
		 * Since VBUS status bit is not reliable for VBUS disconnect
		 * use CHARGER VBUS detection status bit instead.
		 */
		if (sts.bytes[2] & 0x10)
			sts.bytes[2] |= 0x08;

		for (i = 0; sts.int_sts; sts.int_sts >>= 1, i++) {
			local_irq_disable();
			if (sts.int_sts & 0x1) {
				int module_irq = twl6030_irq_base +
					twl6030_interrupt_mapping[i];
				struct irq_desc *d = irq_to_desc(module_irq);

				if (!d) {
					pr_err("twl6030: Invalid SIH IRQ: %d\n",
					       module_irq);
					return -EINVAL;
				}

				/* this may be a wakeup event
				 * d->status flag's are masked while we are
				 * waking up, give some time for the
				 * IRQ to be enabled.
				 */
				start_time = jiffies;
				while ((d->status & IRQ_DISABLED) &&
				       (jiffies_to_msecs(jiffies-start_time) < 100)) {
					yield();
				}

				/* These can't be masked ... always warn
				 * if we get any surprises.
				 */
				if (d->status & IRQ_DISABLED) {
					pr_warning("twl handler not called, irq is disabled!\n");
					note_interrupt(module_irq, d,
							IRQ_NONE);
				}
				else
					d->handle_irq(module_irq, d);

			}
		local_irq_enable();
		}
		ret = twl_i2c_write(TWL_MODULE_PIH, sts.bytes,
				REG_INT_STS_A, 3); /* clear INT_STS_A */
		if (ret)
			pr_warning("twl6030: I2C error in clearing PIH ISR\n");

		enable_irq(irq);
	}

	return 0;
}
Exemple #15
0
/*
 * Issue a new request to a device.
 */
void do_ide_request(struct request_queue *q)
{
	ide_drive_t	*drive = q->queuedata;
	ide_hwif_t	*hwif = drive->hwif;
	struct ide_host *host = hwif->host;
	struct request	*rq = NULL;
	ide_startstop_t	startstop;
	unsigned long queue_run_ms = 3; /* old plug delay */

	spin_unlock_irq(q->queue_lock);

	/* HLD do_request() callback might sleep, make sure it's okay */
	might_sleep();

	if (ide_lock_host(host, hwif))
		goto plug_device_2;

	spin_lock_irq(&hwif->lock);

	if (!ide_lock_port(hwif)) {
		ide_hwif_t *prev_port;

		WARN_ON_ONCE(hwif->rq);
repeat:
		prev_port = hwif->host->cur_port;
		if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
		    time_after(drive->sleep, jiffies)) {
			unsigned long left = jiffies - drive->sleep;

			queue_run_ms = jiffies_to_msecs(left + 1);
			ide_unlock_port(hwif);
			goto plug_device;
		}

		if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
		    hwif != prev_port) {
			ide_drive_t *cur_dev =
				prev_port ? prev_port->cur_dev : NULL;

			/*
			 * set nIEN for previous port, drives in the
			 * quirk list may not like intr setups/cleanups
			 */
			if (cur_dev &&
			    (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
				prev_port->tp_ops->write_devctl(prev_port,
								ATA_NIEN |
								ATA_DEVCTL_OBS);

			hwif->host->cur_port = hwif;
		}
		hwif->cur_dev = drive;
		drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);

		spin_unlock_irq(&hwif->lock);
		spin_lock_irq(q->queue_lock);
		/*
		 * we know that the queue isn't empty, but this can happen
		 * if the q->prep_rq_fn() decides to kill a request
		 */
		if (!rq)
			rq = blk_fetch_request(drive->queue);

		spin_unlock_irq(q->queue_lock);
		spin_lock_irq(&hwif->lock);

		if (!rq) {
			ide_unlock_port(hwif);
			goto out;
		}

		/*
		 * Sanity: don't accept a request that isn't a PM request
		 * if we are currently power managed. This is very important as
		 * blk_stop_queue() doesn't prevent the blk_fetch_request()
		 * above to return us whatever is in the queue. Since we call
		 * ide_do_request() ourselves, we end up taking requests while
		 * the queue is blocked...
		 * 
		 * We let requests forced at head of queue with ide-preempt
		 * though. I hope that doesn't happen too much, hopefully not
		 * unless the subdriver triggers such a thing in its own PM
		 * state machine.
		 */
		if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
		    blk_pm_request(rq) == 0 &&
		    (rq->cmd_flags & REQ_PREEMPT) == 0) {
			/* there should be no pending command at this point */
			ide_unlock_port(hwif);
			goto plug_device;
		}

		hwif->rq = rq;

		spin_unlock_irq(&hwif->lock);
		startstop = start_request(drive, rq);
		spin_lock_irq(&hwif->lock);

		if (startstop == ide_stopped) {
			rq = hwif->rq;
			hwif->rq = NULL;
			goto repeat;
		}
	} else
		goto plug_device;
out:
	spin_unlock_irq(&hwif->lock);
	if (rq == NULL)
		ide_unlock_host(host);
	spin_lock_irq(q->queue_lock);
	return;

plug_device:
	spin_unlock_irq(&hwif->lock);
	ide_unlock_host(host);
plug_device_2:
	spin_lock_irq(q->queue_lock);

	if (rq) {
		blk_requeue_request(q, rq);
		blk_delay_queue(q, queue_run_ms);
	}
}
Exemple #16
0
static void drct_rmw_write_complete_handler(spd_dev_t *dev)
{
  int retval = 0;
  unsigned long flags = 0;
  u32 sector;
  int wsize;
  PTRACE();

  PINFO("<spd%c>complete time=%dms errcode=%d",
	dev->id+'a', jiffies_to_msecs(dev->ticks), dev->errcode);

  if(dev->cache){
    spd_cache_clr_dirty(dev->cache);
  }

  if(unlikely(dev->errcode < 0)){
    retval = dev->errcode;
    goto ABORT;
  }

  if(dev->bdev->rmw_count == 0){
    spin_lock_irqsave(&dev->bdev->rq_lock, flags);
    bdev_end_request(dev, 0);
    spin_unlock_irqrestore(&dev->bdev->rq_lock, flags);

    dev->complete_handler = NULL;
    spd_io_unlock(dev);

    return;
  }

  sector = dev->bdev->rmw_sector;
  wsize  = spd_get_wsize(dev, sector);
  if(unlikely(wsize < 0)){
    PERROR("<spd%c>spd_get_wsize() failed sector=%08x", dev->id+'a', sector);
    retval = -EINVAL;
    goto ABORT;
  }
  sector = align_sector(sector, wsize, spd_get_sector_offset(dev, sector));

  if(sector == dev->bdev->rmw_sector && dev->bdev->rmw_count >= wsize){
    dev->cache->sector   = sector;
    dev->cache->n_sector = wsize;
    retval = drct_make_rmw_sg(dev);
    if(unlikely(retval < 0)){
      PERROR("<spd%c>drct_make_rmw_sg() failed(%d)", dev->id+'a', retval);
      goto ABORT;
    }

    dev->complete_handler = drct_rmw_write_complete_handler;
    retval = spd_write_sector(dev, sector, wsize, dev->sg);
    if(unlikely(retval < 0)){
      PERROR("<spd%c>spd_write_sector() failed(%d)", dev->id+'a', retval);
      goto ABORT;
    }

    dev->cache->sector = (u32)-1;
    spd_cache_clr_dirty(dev->cache);

    return;
  }

  dev->cache->sector   = sector;
  dev->cache->n_sector = wsize;

  dev->complete_handler = drct_rmw_read_complete_handler;
  spd_cache_prepare(dev, SPD_DIR_READ);
  retval = spd_read_sector(dev,
                           dev->cache->sector,
                           dev->cache->n_sector,
                           dev->cache->sg);
  if(unlikely(retval < 0)){
    PERROR("<spd%c>spd_read_sector() failed(%d)", dev->id+'a', retval);
    goto ABORT;
  }

  return;

ABORT:
  PINFO("<spd%c>ABORT at %s", dev->id+'a', __FUNCTION__);
  if(dev->cache){
    dev->cache->sector = (u32)-1;
    spd_cache_clr_dirty(dev->cache);
  }

  spin_lock_irqsave(&dev->bdev->rq_lock, flags);
  bdev_end_request(dev, retval);
  spin_unlock_irqrestore(&dev->bdev->rq_lock, flags);

  dev->complete_handler = NULL;
  spd_io_unlock(dev);
}
static irqreturn_t unicam_camera_isr(int irq, void *arg)
{
	struct unicam_camera_dev *unicam_dev = (struct unicam_camera_dev *)arg;
#if 1
	struct v4l2_subdev *sd = soc_camera_to_subdev(unicam_dev->icd);
#endif
	int ret;
	struct int_desc idesc;
	struct rx_stat_list rx;
	u32 isr_status, raw_stat;
	static unsigned int t1 = 0, t2 = 0, fps = 0;
	struct buffer_desc im0;
	dma_addr_t dma_addr;
	unsigned long flags;

	/* has the interrupt occured for Channel 0? */
	memset(&rx, 0x00, sizeof(struct rx_stat_list));
	raw_stat = mm_csi0_get_rx_stat(&rx, 1);
	if (atomic_read(&unicam_dev->streaming) == 0) {
		memset(&idesc, 0x00, sizeof(struct int_desc));
		isr_status = mm_csi0_get_int_stat(&idesc, 1);
		pr_err("ISR triggered after stop stat=0x%x istat=0x%x\n",
			raw_stat, isr_status);

		goto out;
	} else if (rx.is) {
		memset(&idesc, 0x00, sizeof(struct int_desc));
		isr_status = mm_csi0_get_int_stat(&idesc, 1);
		if (idesc.fsi) {
			if (rx.ps)
				pr_info("Panic at frame start\n");
		}

		if (idesc.fei || idesc.lci){
			struct vb2_buffer *vb = unicam_dev->active;
			/* FS and FE handling */
			if (rx.ps)
				pr_info("Panic at frame or lineend\n");
			fps++;
			if (t1 == 0 && t2 == 0)
				t1 = t2 = jiffies_to_msecs(jiffies);

			t2 = jiffies_to_msecs(jiffies);
			if (t2 - t1 > 1000) {
				pr_info(" sensor fps = %d panic count %d\n",
						fps, unicam_dev->panic_count);
				fps = 0;
				t1 = t2;
			}
			atomic_set(&unicam_dev->cam_triggered, 0);
			/*atomic_set(&unicam_dev->retry_count, 0);
			del_timer(&(unicam_dev->unicam_timer));*/
			pr_debug("frame received");
			if (!vb)
			{
				pr_err("%s: vb is not active\n",__func__);
				goto out;
			}

			if (unicam_dev->skip_frames <= 0) {
				struct v4l2_control ctrl;
				int ret = -1;
				ctrl.value = 0;
				ctrl.id = V4L2_CID_CAMERA_READ_MODE_CHANGE_REG;
				ret = v4l2_subdev_call(sd, core, g_ctrl, &ctrl);

				if ((ret >= 0) && (ctrl.value > 0)) {
					/* capture mode is not ready yet */
					unicam_dev->skip_frames = ctrl.value;
					pr_info("%s: sensor mode change in process ,need_skip_frame=%d\n",
					__func__, ctrl.value);
				}
			}
			if (likely(unicam_dev->skip_frames <= 0)) {
				spin_lock_irqsave(&unicam_dev->lock, flags);
				list_del_init(&to_unicam_camera_vb(vb)->queue);
				spin_unlock_irqrestore(&unicam_dev->lock,
								flags);
				do_gettimeofday(&vb->v4l2_buf.timestamp);
				vb->v4l2_planes[0].bytesused = 0;

				if (unicam_dev->icd->current_fmt->code ==
				    V4L2_MBUS_FMT_JPEG_1X8) {
				} else {
					ret = 1;
				}

				vb2_buffer_done(vb, VB2_BUF_STATE_DONE);

				spin_lock_irqsave(&unicam_dev->lock, flags);
				if (atomic_read(&unicam_dev->stopping) == 1) {
					up(&unicam_dev->stop_sem);
					unicam_dev->active = NULL;
				} else if (!list_empty(&unicam_dev->capture)) {
						unicam_dev->active =
						    &list_entry(unicam_dev->
							capture.next, struct
 							unicam_camera_buffer,
								queue)->vb;
				} else {
Exemple #18
0
/**
 * Compute the speed of specified hash function
 *
 * Run a speed test on the given hash algorithm on buffer using a 1MB buffer
 * size.  This is a reasonable buffer size for Lustre RPCs, even if the actual
 * RPC size is larger or smaller.
 *
 * The speed is stored internally in the cfs_crypto_hash_speeds[] array, and
 * is available through the cfs_crypto_hash_speed() function.
 *
 * This function needs to stay the same as obd_t10_performance_test() so that
 * the speeds are comparable.
 *
 * \param[in] hash_alg	hash algorithm id (CFS_HASH_ALG_*)
 * \param[in] buf	data buffer on which to compute the hash
 * \param[in] buf_len	length of \buf on which to compute hash
 */
static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
{
	int			buf_len = max(PAGE_SIZE, 1048576UL);
	void			*buf;
	unsigned long		start, end;
	int			err = 0;
	unsigned long		bcount;
	struct page		*page;
	unsigned char		hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
	unsigned int		hash_len = sizeof(hash);

	page = alloc_page(GFP_KERNEL);
	if (page == NULL) {
		err = -ENOMEM;
		goto out_err;
	}

	buf = kmap(page);
	memset(buf, 0xAD, PAGE_SIZE);
	kunmap(page);

	for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC / 4),
	     bcount = 0; time_before(jiffies, end) && err == 0; bcount++) {
		struct ahash_request *req;
		int i;

		req = cfs_crypto_hash_init(hash_alg, NULL, 0);
		if (IS_ERR(req)) {
			err = PTR_ERR(req);
			break;
		}

		for (i = 0; i < buf_len / PAGE_SIZE; i++) {
			err = cfs_crypto_hash_update_page(req, page, 0,
							  PAGE_SIZE);
			if (err != 0)
				break;
		}

		err = cfs_crypto_hash_final(req, hash, &hash_len);
		if (err != 0)
			break;
	}
	end = jiffies;
	__free_page(page);
out_err:
	if (err != 0) {
		cfs_crypto_hash_speeds[hash_alg] = err;
		CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
		       cfs_crypto_hash_name(hash_alg), err);
	} else {
		unsigned long   tmp;

		tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
		       1000) / (1024 * 1024);
		cfs_crypto_hash_speeds[hash_alg] = (int)tmp;
		CDEBUG(D_CONFIG, "Crypto hash algorithm %s speed = %d MB/s\n",
		       cfs_crypto_hash_name(hash_alg),
		       cfs_crypto_hash_speeds[hash_alg]);
	}
}
Exemple #19
0
void rtl8192_hw_to_sleep(struct net_device *dev, u32 th, u32 tl)
{
#ifdef _RTL8192_EXT_PATCH_
	struct r8192_priv *priv = rtllib_priv(dev);
	u32 rb = jiffies, sleep_cost = MSECS(8+16+7), delta = 0;
	unsigned long flags;

	if((tl > rb) && (th > 0))
		return;

	spin_lock_irqsave(&priv->ps_lock,flags);

	if (tl >= sleep_cost)
		tl -= sleep_cost;
	else if (th > 0) {
		tl = 0xffffffff - sleep_cost + tl;  
		th--;
	} else {
		spin_unlock_irqrestore(&priv->ps_lock,flags);
		return;
	}

	if (tl > rb) {
		delta = tl - rb;
	} else if (th > 0) {
		delta = 0xffffffff - rb + tl;
		th --;
	} else {
		spin_unlock_irqrestore(&priv->ps_lock,flags);
		return;
	}

	if (delta <= MSECS(MIN_SLEEP_TIME)) {
		spin_unlock_irqrestore(&priv->ps_lock,flags);
		printk("too short to sleep::%x, %x, %lx\n",tl, rb,  MSECS(MIN_SLEEP_TIME));
		return;
	}	

	if(delta > MSECS(MAX_SLEEP_TIME)) {
		spin_unlock_irqrestore(&priv->ps_lock,flags);
		printk("========>too long to sleep:%x, %x, %lx\n", tl, rb,  MSECS(MAX_SLEEP_TIME));
		return;
	}

	RT_TRACE(COMP_LPS, "==============>%s(): wake up time is %d,%d\n",__FUNCTION__,delta,jiffies_to_msecs(delta));
	queue_delayed_work_rsl(priv->rtllib->wq,&priv->rtllib->hw_wakeup_wq,delta); 
	queue_delayed_work_rsl(priv->rtllib->wq, (void *)&priv->rtllib->hw_sleep_wq,0);

	spin_unlock_irqrestore(&priv->ps_lock,flags);
#else
	struct r8192_priv *priv = rtllib_priv(dev);

	u32 rb = jiffies;
	unsigned long flags;

	spin_lock_irqsave(&priv->ps_lock,flags);

	tl -= MSECS(8+16+7);

	if(((tl>=rb)&& (tl-rb) <= MSECS(MIN_SLEEP_TIME))
			||((rb>tl)&& (rb-tl) < MSECS(MIN_SLEEP_TIME))) {
		spin_unlock_irqrestore(&priv->ps_lock,flags);
		printk("too short to sleep::%x, %x, %lx\n",tl, rb,  MSECS(MIN_SLEEP_TIME));
		return;
	}	

	if(((tl > rb) && ((tl-rb) > MSECS(MAX_SLEEP_TIME)))||
			((tl < rb) && (tl>MSECS(69)) && ((rb-tl) > MSECS(MAX_SLEEP_TIME)))||
			((tl<rb)&&(tl<MSECS(69))&&((tl+0xffffffff-rb)>MSECS(MAX_SLEEP_TIME)))) {
		printk("========>too long to sleep:%x, %x, %lx\n", tl, rb,  MSECS(MAX_SLEEP_TIME));
		spin_unlock_irqrestore(&priv->ps_lock,flags);
		return;
	}
	{
		u32 tmp = (tl>rb)?(tl-rb):(rb-tl);
		queue_delayed_work_rsl(priv->rtllib->wq,
				&priv->rtllib->hw_wakeup_wq,tmp); 
	}
	queue_delayed_work_rsl(priv->rtllib->wq, 
			(void *)&priv->rtllib->hw_sleep_wq,0);
	spin_unlock_irqrestore(&priv->ps_lock,flags);
#endif
}
Exemple #20
0
void ath9k_htc_ani_work(struct work_struct *work)
{
	struct ath9k_htc_priv *priv =
		container_of(work, struct ath9k_htc_priv, ani_work.work);
	struct ath_hw *ah = priv->ah;
	struct ath_common *common = ath9k_hw_common(ah);
	bool longcal = false;
	bool shortcal = false;
	bool aniflag = false;
	unsigned int timestamp = jiffies_to_msecs(jiffies);
	u32 cal_interval, short_cal_interval;

	short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
		ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;

	/* Only calibrate if awake */
	if (ah->power_mode != ATH9K_PM_AWAKE)
		goto set_timer;

	/* Long calibration runs independently of short calibration. */
	if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
		longcal = true;
		ath_dbg(common, ANI, "longcal @%lu\n", jiffies);
		common->ani.longcal_timer = timestamp;
	}

	/* Short calibration applies only while caldone is false */
	if (!common->ani.caldone) {
		if ((timestamp - common->ani.shortcal_timer) >=
		    short_cal_interval) {
			shortcal = true;
			ath_dbg(common, ANI, "shortcal @%lu\n", jiffies);
			common->ani.shortcal_timer = timestamp;
			common->ani.resetcal_timer = timestamp;
		}
	} else {
		if ((timestamp - common->ani.resetcal_timer) >=
		    ATH_RESTART_CALINTERVAL) {
			common->ani.caldone = ath9k_hw_reset_calvalid(ah);
			if (common->ani.caldone)
				common->ani.resetcal_timer = timestamp;
		}
	}

	/* Verify whether we must check ANI */
	if (ah->config.enable_ani &&
	    (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
		aniflag = true;
		common->ani.checkani_timer = timestamp;
	}

	/* Skip all processing if there's nothing to do. */
	if (longcal || shortcal || aniflag) {

		ath9k_htc_ps_wakeup(priv);

		/* Call ANI routine if necessary */
		if (aniflag)
			ath9k_hw_ani_monitor(ah, ah->curchan);

		/* Perform calibration if necessary */
		if (longcal || shortcal)
			common->ani.caldone =
				ath9k_hw_calibrate(ah, ah->curchan,
						   ah->rxchainmask, longcal);

		ath9k_htc_ps_restore(priv);
	}

set_timer:
	/*
	* Set timer interval based on previous results.
	* The interval must be the shortest necessary to satisfy ANI,
	* short calibration and long calibration.
	*/
	cal_interval = ATH_LONG_CALINTERVAL;
	if (ah->config.enable_ani)
		cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
	if (!common->ani.caldone)
		cal_interval = min(cal_interval, (u32)short_cal_interval);

	ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
				     msecs_to_jiffies(cal_interval));
}
Exemple #21
0
static irqreturn_t pmc_wakeup_isr(int this_irq, void *dev_id)
{
	unsigned int status;
	unsigned long flags;

	status = PMWS_VAL;          /* Copy the wakeup status */
	udelay(100);
	PMWS_VAL = status;
	//printk("pmc_wakeup_isr %x\n",status);

	if(status & BIT1){
		viatelcom_irq_cp_wake_ap(this_irq,dev_id);
		PMWS_VAL |= BIT1;
		
	}
	
#ifdef KEYPAD_POWER_SUPPORT
	if((status & BIT14) && kpadPower_dev) {

		spin_lock_irqsave(&kpadPower_lock, flags);
		if(!powerKey_is_pressed) {
			powerKey_is_pressed = 1; 
			input_report_key(kpadPower_dev, KEY_POWER, 1); //power key is pressed
			input_sync(kpadPower_dev);
			pressed_jiffies = jiffies;
			wmt_pwrbtn_debounce_value(power_up_debounce_value);
			DPRINTK("\n[%s]power key pressed -->\n",__func__);
			time1 = jiffies_to_msecs(jiffies);
		} else {
			input_event(kpadPower_dev, EV_KEY, KEY_POWER, 2); // power key repeat
			input_sync(kpadPower_dev);
			DPRINTK("\n[%s]power key repeat\n",__func__);

		}
		//disable_irq(IRQ_PMC_WAKEUP);
		spin_unlock_irqrestore(&kpadPower_lock, flags);
		mod_timer(&kpadPower_timer, jiffies + power_button_timeout);
	}
#endif


    
	if (status & BIT14) {       /* Power button wake up */
#if defined(SOFT_POWER_SUPPORT) && defined(CONFIG_PROC_FS)
		softpower_data = 1;
#endif
		/*schedule_work(&PMC_shutdown);*/
	}
	
	if (status & (1 << WKS_UHC)) {       /* UHC wake up */
		PMWE_VAL &= ~(1 << WKS_UHC);
	}

#ifdef RTC_WAKEUP_SUPPORT
	if (status & BIT15)        /* Check RTC wakeup status bit */
		PMWS_VAL |= BIT15;
#endif

#ifdef MOUSE_WAKEUP_SUPPORT
	if(status & BIT11)
		PMWS_VAL |= BIT11;
#endif

#ifdef KB_WAKEUP_SUPPORT
	if(status & BIT11)
		PMWS_VAL |= BIT10;
#endif

	return IRQ_HANDLED;
}
Exemple #22
0
void omap2_pm_dump(int mode, int resume, unsigned int us)
{
	struct reg {
		const char *name;
		u32 val;
	} regs[32];
	int reg_count = 0, i;
	const char *s1 = NULL, *s2 = NULL;

	if (!resume) {
#if 0
		/* MPU */
		DUMP_PRM_MOD_REG(OCP_MOD, OMAP2_PRM_IRQENABLE_MPU_OFFSET);
		DUMP_CM_MOD_REG(MPU_MOD, OMAP2_CM_CLKSTCTRL);
		DUMP_PRM_MOD_REG(MPU_MOD, OMAP2_PM_PWSTCTRL);
		DUMP_PRM_MOD_REG(MPU_MOD, OMAP2_PM_PWSTST);
		DUMP_PRM_MOD_REG(MPU_MOD, PM_WKDEP);
#endif
#if 0
		/* INTC */
		DUMP_INTC_REG(INTC_MIR0, 0x0084);
		DUMP_INTC_REG(INTC_MIR1, 0x00a4);
		DUMP_INTC_REG(INTC_MIR2, 0x00c4);
#endif
#if 0
		DUMP_CM_MOD_REG(CORE_MOD, CM_FCLKEN1);
		if (cpu_is_omap24xx()) {
			DUMP_CM_MOD_REG(CORE_MOD, OMAP24XX_CM_FCLKEN2);
			DUMP_PRM_MOD_REG(OMAP24XX_GR_MOD,
					OMAP2_PRCM_CLKEMUL_CTRL_OFFSET);
			DUMP_PRM_MOD_REG(OMAP24XX_GR_MOD,
					OMAP2_PRCM_CLKSRC_CTRL_OFFSET);
		}
		DUMP_CM_MOD_REG(WKUP_MOD, CM_FCLKEN);
		DUMP_CM_MOD_REG(CORE_MOD, CM_ICLKEN1);
		DUMP_CM_MOD_REG(CORE_MOD, CM_ICLKEN2);
		DUMP_CM_MOD_REG(WKUP_MOD, CM_ICLKEN);
		DUMP_CM_MOD_REG(PLL_MOD, CM_CLKEN);
		DUMP_CM_MOD_REG(PLL_MOD, CM_AUTOIDLE);
		DUMP_PRM_MOD_REG(CORE_MOD, OMAP2_PM_PWSTST);
#endif
#if 0
		/* DSP */
		if (cpu_is_omap24xx()) {
			DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_FCLKEN);
			DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_ICLKEN);
			DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_IDLEST);
			DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_AUTOIDLE);
			DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, CM_CLKSEL);
			DUMP_CM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_CM_CLKSTCTRL);
			DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_RM_RSTCTRL);
			DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_RM_RSTST);
			DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_PM_PWSTCTRL);
			DUMP_PRM_MOD_REG(OMAP24XX_DSP_MOD, OMAP2_PM_PWSTST);
		}
#endif
	} else {
		DUMP_PRM_MOD_REG(CORE_MOD, PM_WKST1);
		if (cpu_is_omap24xx())
			DUMP_PRM_MOD_REG(CORE_MOD, OMAP24XX_PM_WKST2);
		DUMP_PRM_MOD_REG(WKUP_MOD, PM_WKST);
		DUMP_PRM_MOD_REG(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
#if 1
		DUMP_INTC_REG(INTC_PENDING_IRQ0, 0x0098);
		DUMP_INTC_REG(INTC_PENDING_IRQ1, 0x00b8);
		DUMP_INTC_REG(INTC_PENDING_IRQ2, 0x00d8);
#endif
	}

	switch (mode) {
	case 0:
		s1 = "full";
		s2 = "retention";
		break;
	case 1:
		s1 = "MPU";
		s2 = "retention";
		break;
	case 2:
		s1 = "MPU";
		s2 = "idle";
		break;
	}

	if (!resume)
#ifdef CONFIG_NO_HZ
		printk(KERN_INFO
		       "--- Going to %s %s (next timer after %u ms)\n", s1, s2,
		       jiffies_to_msecs(get_next_timer_interrupt(jiffies) -
					jiffies));
#else
		printk(KERN_INFO "--- Going to %s %s\n", s1, s2);
#endif
	else
Exemple #23
0
static ssize_t usbdev_trig_name_store(struct device *dev,
				      struct device_attribute *attr,
				      const char *buf,
				      size_t size)
{
	struct led_classdev *led_cdev = dev_get_drvdata(dev);
	struct usbdev_trig_data *td = led_cdev->trigger_data;

	if (size < 0 || size >= DEV_BUS_ID_SIZE)
		return -EINVAL;

	write_lock(&td->lock);

	strcpy(td->device_name, buf);
	if (size > 0 && td->device_name[size - 1] == '\n')
		td->device_name[size - 1] = 0;

	if (td->device_name[0] != 0) {
		struct usbdev_trig_match match = {
			.device_name = td->device_name,
		};

		/* check for existing device to update from */
		usb_for_each_dev(&match, usbdev_trig_find_usb_dev);
		if (match.usb_dev) {
			if (td->usb_dev)
				usb_put_dev(td->usb_dev);

			td->usb_dev = match.usb_dev;
			td->last_urbnum = atomic_read(&match.usb_dev->urbnum);
		}

		/* updates LEDs, may start timers */
		usbdev_trig_update_state(td);
	}

	write_unlock(&td->lock);
	return size;
}

static DEVICE_ATTR(device_name, 0644, usbdev_trig_name_show,
		   usbdev_trig_name_store);

static ssize_t usbdev_trig_interval_show(struct device *dev,
				 	 struct device_attribute *attr,
					 char *buf)
{
	struct led_classdev *led_cdev = dev_get_drvdata(dev);
	struct usbdev_trig_data *td = led_cdev->trigger_data;

	read_lock(&td->lock);
	sprintf(buf, "%u\n", jiffies_to_msecs(td->interval));
	read_unlock(&td->lock);

	return strlen(buf) + 1;
}

static ssize_t usbdev_trig_interval_store(struct device *dev,
					  struct device_attribute *attr,
					  const char *buf,
					  size_t size)
{
	struct led_classdev *led_cdev = dev_get_drvdata(dev);
	struct usbdev_trig_data *td = led_cdev->trigger_data;
	int ret = -EINVAL;
	char *after;
	unsigned long value = simple_strtoul(buf, &after, 10);
	size_t count = after - buf;

	if (*after && isspace(*after))
		count++;

	if (count == size && value <= 10000) {
		write_lock(&td->lock);
		td->interval = msecs_to_jiffies(value);
		usbdev_trig_update_state(td); /* resets timer */
		write_unlock(&td->lock);
		ret = count;
	}

	return ret;
}

static DEVICE_ATTR(activity_interval, 0644, usbdev_trig_interval_show,
		   usbdev_trig_interval_store);

static int usbdev_trig_notify(struct notifier_block *nb,
			      unsigned long evt,
			      void *data)
{
	struct usb_device *usb_dev;
	struct usbdev_trig_data *td;

	if (evt != USB_DEVICE_ADD && evt != USB_DEVICE_REMOVE)
		return NOTIFY_DONE;

	usb_dev = data;
	td = container_of(nb, struct usbdev_trig_data, notifier);

	write_lock(&td->lock);

	if (strcmp(dev_name(&usb_dev->dev), td->device_name))
		goto done;

	if (evt == USB_DEVICE_ADD) {
		usb_get_dev(usb_dev);
		if (td->usb_dev != NULL)
			usb_put_dev(td->usb_dev);
		td->usb_dev = usb_dev;
		td->last_urbnum = atomic_read(&usb_dev->urbnum);
	} else if (evt == USB_DEVICE_REMOVE) {
		if (td->usb_dev != NULL) {
			usb_put_dev(td->usb_dev);
			td->usb_dev = NULL;
		}
	}

	usbdev_trig_update_state(td);

done:
	write_unlock(&td->lock);
	return NOTIFY_DONE;
}

/* here's the real work! */
static void usbdev_trig_timer(unsigned long arg)
{
	struct usbdev_trig_data *td = (struct usbdev_trig_data *)arg;
	int new_urbnum;

	write_lock(&td->lock);

	if (!td->usb_dev || td->interval == 0) {
		/*
		 * we don't need to do timer work, just reflect device presence
		 */
		if (td->usb_dev)
			led_set_brightness(td->led_cdev, LED_FULL);
		else
			led_set_brightness(td->led_cdev, LED_OFF);

		goto no_restart;
	}

	if (td->interval)
		new_urbnum = atomic_read(&td->usb_dev->urbnum);
	else
		new_urbnum = 0;

	if (td->usb_dev) {
		/*
		 * Base state is ON (device is present). If there's no device,
		 * we don't get this far and the LED is off.
		 * OFF -> ON always
		 * ON -> OFF on activity
		 */
		if (td->led_cdev->brightness == LED_OFF)
			led_set_brightness(td->led_cdev, LED_FULL);
		else if (td->last_urbnum != new_urbnum)
			led_set_brightness(td->led_cdev, LED_OFF);
	} else {
		/*
		 * base state is OFF
		 * ON -> OFF always
		 * OFF -> ON on activity
		 */
		if (td->led_cdev->brightness == LED_FULL)
			led_set_brightness(td->led_cdev, LED_OFF);
		else if (td->last_urbnum != new_urbnum)
			led_set_brightness(td->led_cdev, LED_FULL);
	}

	td->last_urbnum = new_urbnum;
	mod_timer(&td->timer, jiffies + td->interval);

no_restart:
	write_unlock(&td->lock);
}

static void usbdev_trig_activate(struct led_classdev *led_cdev)
{
	struct usbdev_trig_data *td;
	int rc;

	td = kzalloc(sizeof(struct usbdev_trig_data), GFP_KERNEL);
	if (!td)
		return;

	rwlock_init(&td->lock);

	td->notifier.notifier_call = usbdev_trig_notify;
	td->notifier.priority = 10;

	setup_timer(&td->timer, usbdev_trig_timer, (unsigned long) td);

	td->led_cdev = led_cdev;
	td->interval = msecs_to_jiffies(50);

	led_cdev->trigger_data = td;

	rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
	if (rc)
		goto err_out;

	rc = device_create_file(led_cdev->dev, &dev_attr_activity_interval);
	if (rc)
		goto err_out_device_name;

	usb_register_notify(&td->notifier);
	return;

err_out_device_name:
	device_remove_file(led_cdev->dev, &dev_attr_device_name);
err_out:
	led_cdev->trigger_data = NULL;
	kfree(td);
}

static void usbdev_trig_deactivate(struct led_classdev *led_cdev)
{
	struct usbdev_trig_data *td = led_cdev->trigger_data;

	if (td) {
		usb_unregister_notify(&td->notifier);

		device_remove_file(led_cdev->dev, &dev_attr_device_name);
		device_remove_file(led_cdev->dev, &dev_attr_activity_interval);

		write_lock(&td->lock);

		if (td->usb_dev) {
			usb_put_dev(td->usb_dev);
			td->usb_dev = NULL;
		}

		write_unlock(&td->lock);

		del_timer_sync(&td->timer);

		kfree(td);
	}
}

static struct led_trigger usbdev_led_trigger = {
	.name		= "usbdev",
	.activate	= usbdev_trig_activate,
	.deactivate	= usbdev_trig_deactivate,
};

static int __init usbdev_trig_init(void)
{
	return led_trigger_register(&usbdev_led_trigger);
}

static void __exit usbdev_trig_exit(void)
{
	led_trigger_unregister(&usbdev_led_trigger);
}

module_init(usbdev_trig_init);
module_exit(usbdev_trig_exit);

MODULE_AUTHOR("Gabor Juhos <*****@*****.**>");
MODULE_DESCRIPTION("USB device LED trigger");
MODULE_LICENSE("GPL v2");
static void fslepdc_update_area(update_area_t *update_area)
{
    int is_MU = 0;

    if ( einkfb_power_level_on == fslepdc_power_level )
    {
        fx_type update_mode = update_area->which_fx;
        u8 *data = update_area->buffer;

        if ( fx_display_sync == update_mode )
        {
            fslepdc_sync();
            data = NULL;
        }
    
        if ( data || UPDATE_MODE_BUFFER_DISPLAY(update_mode) )
        {
            bool    skip_buffer_display = false,
                    skip_buffer_load    = false,
                    flashing_update     = false, 
                    area_update         = false;
            u32     waveform_mode       = fslepdc_get_waveform_mode(WF_UPD_MODE_GC);
            
            struct mxcfb_update_data update_data;
            struct mxcfb_rect dirty_rect;
            struct einkfb_info info;

	    cancel_rearming_delayed_work(&fslepdc_repair_work);
            
            fslepdc_set_ld_img_start = jiffies;
            
            fsledpc_init_update_data(&update_data);
            einkfb_get_info(&info);
        
            update_data.update_region.left   = update_area->x1;
            update_data.update_region.top    = update_area->y1;
            update_data.update_region.width  = update_area->x2 - update_area->x1;
            update_data.update_region.height = update_area->y2 - update_area->y1; ;
            
            if ( (info.xres == update_data.update_region.width) && (info.yres == update_data.update_region.height) )
                area_update = false;
            else
                area_update = true;
            
            switch ( update_mode )
            {
                // Just load up the hardware's buffer; don't display it.
                //
                case fx_buffer_load:
                    skip_buffer_display = true;
                break;
        
                // Just display what's already in the hardware buffer; don't reload it.
                //
                case fx_buffer_display_partial:
                case fx_buffer_display_full:
                    skip_buffer_load = true;
                goto set_update_mode;
        
                // Regardless of what gets put into the hardware's buffer,
                // only update the black and white pixels.
                //
                case fx_update_fast:
                  waveform_mode = fslepdc_get_waveform_mode(WF_UPD_MODE_PU);
                  is_MU = is_MU_skip;
                goto set_update_mode;
        
                // Regardless of what gets put into the hardware's buffer,
                // use white transition update.
                //
                case fx_update_white_trans:
                  waveform_mode = fslepdc_get_waveform_mode(WF_UPD_MODE_GLF);
                  is_MU = is_MU_skip;
                goto set_update_mode;
                    
                // Regardless of what gets put into the hardware's buffer,
                // refresh all pixels as cleanly as possible.
                //
                case fx_update_slow:
                  waveform_mode = fslepdc_get_waveform_mode(WF_UPD_MODE_GC);
                  is_MU = is_MU_skip;
                /* goto set_update_mode; */
                    
                set_update_mode:
                default:
                    
                    // Normalize to either flashing or non-flashing.
                    //
                    update_mode     = area_update     ? UPDATE_AREA_MODE(update_mode)
                                                      : UPDATE_MODE(update_mode);
                    
                    // Simplify that.
                    //
                    flashing_update = UPDATE_FULL(update_mode);

                    // Don't use DU/MU for flashing updates
                    is_MU = flashing_update ? is_MU_skip : is_MU;

                    // Convert to the MXC EPDC's scheme.
                    //
                    update_mode     = flashing_update ? UPDATE_MODE_FULL
                                                      : UPDATE_MODE_PARTIAL;
                break;
            }
    
            // Process and load the image data if we should.
            //
            fslepdc_ld_img_start = jiffies;
           
	    if (!skip_buffer_load)
	    {
                // Check to see whether we can force an MU or not.
                //
                if (mxc_epdc_blit_to_fb(data, &update_data.update_region, &dirty_rect))
                {
                        // In the fx_update_fast & fx_update_slow cases, we want very
                        // specific waveform modes.  So, in those cases, we don't
                        // want to force an MU, even if we can.
                        //
                        if (0 == is_MU)
                                is_MU = 1;

                }
            }

            // Skip the forced MU when we should.
            //
            if (is_MU_skip == is_MU)
                is_MU = 0;

            // Update the display in the specified way if we should.
            //
            fslepdc_upd_data_start = jiffies;

            if ( !skip_buffer_display )
            {
                // If this is a flashing or a full-screen update, wait until
                // the last update completes before sending a new one.
                //
                if ( flashing_update || !area_update ) {
			if (fslepdc_repair_x1 && fslepdc_repair_y1 && 
				((fslepdc_repair_y2 - fslepdc_repair_y1) <= 300) &&
				((fslepdc_repair_x2 - fslepdc_repair_x1) <= 300) )
			{
				/* Do Nothing */
			}
			else {
				if ( (fslepdc_var.xres == (fslepdc_repair_x2 - fslepdc_repair_x1)) &&
					(fslepdc_var.yres == (fslepdc_repair_y2 - fslepdc_repair_y1)) ){
						/* Do Nothing */
				}
				else 
					fslepdc_sync();
			}
		}
                
                // Set up to perform the specified update type, marking it pseudo-uniquely.  Also,
                // if we're using the built-in waveform, which is 25C-only, don't even bother
                // using the cached temperature since it won't be used anyway.
                //
    
                update_data.update_mode   = update_mode;
                update_data.temp          = fslepdc_using_builtin_waveform() ? TEMP_USE_AMBIENT
                                                                             : fslepdc_read_temperature();

                // Send the update itself.
                //
		if (is_MU)
			update_data.waveform_mode = WF_UPD_MODE_MU;
		else
			update_data.waveform_mode = waveform_mode;

		fslepdc_last_update_marker++;
		update_data.update_marker = fslepdc_last_update_marker;
			
		/* repair logic */
		if (!fslepdc_repair_count ) {
			fslepdc_repair_x1 = update_data.update_region.left;
			fslepdc_repair_y1 = update_data.update_region.top;
			fslepdc_repair_x2 = fslepdc_repair_x1 + update_data.update_region.width;
			fslepdc_repair_y2 = fslepdc_repair_y1 + update_data.update_region.height;

			fslepdc_repair_count++;
		}
		else {
			fslepdc_repair_x1 = min(update_data.update_region.left, fslepdc_repair_x1);
			fslepdc_repair_y1 = min(update_data.update_region.top, fslepdc_repair_y1);
			fslepdc_repair_x2 = max((update_data.update_region.left +
					update_data.update_region.width), fslepdc_repair_x2);

			fslepdc_repair_y2 = max((update_data.update_region.top +
					update_data.update_region.height), fslepdc_repair_y2);
			fslepdc_repair_count++;
		}

		fslepdc_send_update(&update_data, FSLEPDC_UPDATE_NEW);
		if (UPDATE_MODE_FULL == update_data.update_mode) {
			fslepdc_repair_count = 0;
		}
		else {
			schedule_delayed_work(&fslepdc_repair_work, msecs_to_jiffies(1000));
		}
	}

	fslepdc_image_stop_time = jiffies;
    
	fslepdc_image_start_time = jiffies_to_msecs(fslepdc_set_ld_img_start - info.jif_on);
	fslepdc_image_processing_time = jiffies_to_msecs(fslepdc_ld_img_start - fslepdc_set_ld_img_start);
	fslepdc_image_loading_time = jiffies_to_msecs(fslepdc_upd_data_start - fslepdc_ld_img_start);
	fslepdc_image_display_time = jiffies_to_msecs(fslepdc_image_stop_time  - fslepdc_upd_data_start);
	fslepdc_image_stop_time = jiffies_to_msecs(fslepdc_image_stop_time  - info.jif_on);
        }
    }
}
int mipi_dsi_on(struct platform_device *pdev)
{
	int ret = 0;
	struct balong_fb_data_type *balongfd = NULL;
	unsigned long timeout = jiffies;
	if (NULL == pdev) {
		balongfb_loge("NULL Pointer\n");
		return -EINVAL;
	}

	balongfd = (struct balong_fb_data_type *)platform_get_drvdata(pdev);
	if (NULL == balongfd) {
		balongfb_loge("NULL Pointer\n");
		return -EINVAL;
	}

	/* set LCD init step before LCD on*/
	balongfd->panel_info.lcd_init_step = LCD_INIT_POWER_ON;
	ret = panel_next_on(pdev);

	/* mipi dphy clock enable */
	ret = clk_prepare_enable(balongfd->dsi_cfg_clk);
	if (ret != 0) {
		balongfb_loge("failed to enable dsi_cfg_clk, error=%d!\n", ret);
		return ret;
	}

	/* dsi pixel on */
	set_reg(balongfd->ade_base + LDI_HDMI_DSI_GT_REG, 0x0, 1, 0);
	/* mipi init */
	mipi_init(balongfd);

	/* modified for b052 bbit begin */
	/* switch to command mode */
	set_MIPIDSI_MODE_CFG(MIPIDSI_COMMAND_MODE);
	set_MIPIDSI_CMD_MODE_CFG_all_en_flag(1);

	/* 禁止向Clock Lane发起HS时钟传输请求 */
	set_MIPIDSI_LPCLK_CTRL_phy_txrequestclkhs(0);
	/* add for timeout print log */
	balongfb_loge("%s: dsi_on_time = %u,curfreq = %d\n",
			__func__,jiffies_to_msecs(jiffies-timeout),cpufreq_get_fb(0));
	timeout = jiffies;
	ret = panel_next_on(pdev);
	/* modified for b052 bbit begin */
	/* add for timeout print log */
	balongfb_loge("%s: panel_on_time = %u,curfreq = %d\n",
			__func__,jiffies_to_msecs(jiffies-timeout),cpufreq_get_fb(0));
	/* reset Core */
	set_MIPIDSI_PWR_UP_shutdownz(0);

	if (balongfd->panel_info.type == PANEL_MIPI_VIDEO) {
		/* switch to video mode */
	set_MIPIDSI_MODE_CFG(MIPIDSI_VIDEO_MODE);

#if ADE_DEBUG_LOG_ENABLE
	/* set to video lcd mode */
	g_panel_lcd_mode = 0;
#endif
	}

	if (balongfd->panel_info.type == PANEL_MIPI_CMD) {
		/* switch to cmd mode */
		set_MIPIDSI_CMD_MODE_CFG_all_en_flag(0);

#if ADE_DEBUG_LOG_ENABLE
		/* set to command lcd mode */
		g_panel_lcd_mode = 1;
#endif
	}

	/* enable generate High Speed clock */
	set_MIPIDSI_LPCLK_CTRL_phy_txrequestclkhs(1);
	/* Waking up Core */
	set_MIPIDSI_PWR_UP_shutdownz(1);
	/*set max packet size, 0x1 << 8 |0x37*/
	set_MIPIDSI_GEN_HDR(NULL, 0x137);
	lcd_pwr_status.lcd_dcm_pwr_status |= BIT(1);
	do_gettimeofday(&lcd_pwr_status.tvl_lcd_on);
	time_to_tm(lcd_pwr_status.tvl_lcd_on.tv_sec, 0, &lcd_pwr_status.tm_lcd_on);
	return ret;
}
Exemple #26
0
uint32_t dnx_timestamp(void)
{
  return jiffies_to_msecs(jiffies);
}
Exemple #27
0
static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct RioCommand rio_cmd;
	struct rio_usb_data *rio = &rio_instance;
	void __user *data;
	unsigned char *buffer;
	int result, requesttype;
	int retries;
	int retval=0;

	mutex_lock(&rio500_mutex);
        /* Sanity check to make sure rio is connected, powered, etc */
        if (rio->present == 0 || rio->rio_dev == NULL) {
		retval = -ENODEV;
		goto err_out;
	}

	switch (cmd) {
	case RIO_RECV_COMMAND:
		data = (void __user *) arg;
		if (data == NULL)
			break;
		if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
			retval = -EFAULT;
			goto err_out;
		}
		if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
			retval = -EINVAL;
			goto err_out;
		}
		buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
		if (buffer == NULL) {
			retval = -ENOMEM;
			goto err_out;
		}
		if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
			retval = -EFAULT;
			free_page((unsigned long) buffer);
			goto err_out;
		}

		requesttype = rio_cmd.requesttype | USB_DIR_IN |
		    USB_TYPE_VENDOR | USB_RECIP_DEVICE;
		dev_dbg(&rio->rio_dev->dev,
			"sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
			requesttype, rio_cmd.request, rio_cmd.value,
			rio_cmd.index, rio_cmd.length);
		/* Send rio control message */
		retries = 3;
		while (retries) {
			result = usb_control_msg(rio->rio_dev,
						 usb_rcvctrlpipe(rio-> rio_dev, 0),
						 rio_cmd.request,
						 requesttype,
						 rio_cmd.value,
						 rio_cmd.index, buffer,
						 rio_cmd.length,
						 jiffies_to_msecs(rio_cmd.timeout));
			if (result == -ETIMEDOUT)
				retries--;
			else if (result < 0) {
				dev_err(&rio->rio_dev->dev,
					"Error executing ioctrl. code = %d\n",
					result);
				retries = 0;
			} else {
				dev_dbg(&rio->rio_dev->dev,
					"Executed ioctl. Result = %d (data=%02x)\n",
					result, buffer[0]);
				if (copy_to_user(rio_cmd.buffer, buffer,
						 rio_cmd.length)) {
					free_page((unsigned long) buffer);
					retval = -EFAULT;
					goto err_out;
				}
				retries = 0;
			}

			/* rio_cmd.buffer contains a raw stream of single byte
			   data which has been returned from rio.  Data is
			   interpreted at application level.  For data that
			   will be cast to data types longer than 1 byte, data
			   will be little_endian and will potentially need to
			   be swapped at the app level */

		}
		free_page((unsigned long) buffer);
		break;

	case RIO_SEND_COMMAND:
		data = (void __user *) arg;
		if (data == NULL)
			break;
		if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
			retval = -EFAULT;
			goto err_out;
		}
		if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
			retval = -EINVAL;
			goto err_out;
		}
		buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
		if (buffer == NULL) {
			retval = -ENOMEM;
			goto err_out;
		}
		if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
			free_page((unsigned long)buffer);
			retval = -EFAULT;
			goto err_out;
		}

		requesttype = rio_cmd.requesttype | USB_DIR_OUT |
		    USB_TYPE_VENDOR | USB_RECIP_DEVICE;
		dev_dbg(&rio->rio_dev->dev,
			"sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
			requesttype, rio_cmd.request, rio_cmd.value,
			rio_cmd.index, rio_cmd.length);
		/* Send rio control message */
		retries = 3;
		while (retries) {
			result = usb_control_msg(rio->rio_dev,
						 usb_sndctrlpipe(rio-> rio_dev, 0),
						 rio_cmd.request,
						 requesttype,
						 rio_cmd.value,
						 rio_cmd.index, buffer,
						 rio_cmd.length,
						 jiffies_to_msecs(rio_cmd.timeout));
			if (result == -ETIMEDOUT)
				retries--;
			else if (result < 0) {
				dev_err(&rio->rio_dev->dev,
					"Error executing ioctrl. code = %d\n",
					result);
				retries = 0;
			} else {
				dev_dbg(&rio->rio_dev->dev,
					"Executed ioctl. Result = %d\n", result);
				retries = 0;

			}

		}
		free_page((unsigned long) buffer);
		break;

	default:
		retval = -ENOTTY;
		break;
	}


err_out:
	mutex_unlock(&rio500_mutex);
	return retval;
}
Exemple #28
0
/*****************************************************************************
Description    : Set the frequency control as the req_lock_level indicate and start or refresh 
                      the 1s frequency control release timer. This function run in the frequency control
                      work quenue.
Prototype      : void  k3v2_do_freq_lock_work(struct work_struct *work)
Input  Param   :
Output  Param  :
Return  Value  :
******************************************************************************/
static void  k3v2_do_freq_lock_work(struct work_struct *work){
    unsigned char req_lock_level = 0;
    freq_lock_control_t *fl_control_ptr =
        container_of(work, freq_lock_control_t, do_freq_lock_work);
#ifdef DEBUG_WIFI_FREQ_LOCK
    unsigned long temp_jiffies;
    unsigned long  freq_cfg_duration;
#endif
    
    if(NULL == fl_control_ptr){
        printk("k3v2_do_freq_lock_work  NULL point error!\n");
        return;
    }
    
    mutex_lock(&fl_control_ptr->lock_freq_mtx);
    req_lock_level = fl_control_ptr->req_lock_level;
    if((req_lock_level > MAX_SPEED_FREQ_LEVEL) || 
        (0 == req_lock_level)){
        printk("k3v2_do_freq_lock_work  invalid lock_level error!\n");
        return;
    }

    if(LR_START_MAGIC == fl_control_ptr->release_work_state){
        fl_control_ptr->release_work_state = LR_SHOULD_DROP_MAGIC;
        /*disable the pending release lock work in queue here.*/
        lfprintf("WARM: k3v2_do_freq_lock_work set magic for delete pending release lock work.\n");
    }
    
    if(FREQ_LOCK_ENABLE == fl_control_ptr->lock_mod){       
        if (req_lock_level != fl_control_ptr->lock_level ){
#ifdef CONFIG_CPU_FREQ_GOV_K3HOTPLUG
            /* Frequency lock level has been changed. Do new level's frequency lock here.*/
            pm_qos_update_request(&fl_control_ptr->cpu_qos_request, 
                                        speed_freq_level[req_lock_level-1].min_cpu_freq);

            if(fl_control_ptr->lock_level < START_DDR_FREQ_LOCK_LEVEL){
                /*Old DDR frequency lock is not open*/
                if(req_lock_level >= START_DDR_FREQ_LOCK_LEVEL){
                    /* need open DDR frequency lock now.*/
                    pm_qos_add_request(&fl_control_ptr->ddr_qos_request, PM_QOS_DDR_MIN_PROFILE, 
                        speed_freq_level[req_lock_level-1].min_ddr_freq);
                }
                /*If old and new lock level all smaller than START_DDR_FREQ_LOCK_LEVEL, 
                *  not need request the DDR frequency lock still.*/
            }else{
                /*Old DDR frequency lock is open*/
                if(req_lock_level >= START_DDR_FREQ_LOCK_LEVEL){
                    /*New DDR frequency lock need open also,just update it.*/
                    pm_qos_update_request(&fl_control_ptr->ddr_qos_request, 
                            speed_freq_level[req_lock_level-1].min_ddr_freq);
                }else{
                    /*Old DDR frequency lock is open, new lock level not need it to be open, 
                    *  release the DDR frequency lock .*/
                    pm_qos_remove_request(&fl_control_ptr->ddr_qos_request);
                }
            }
            fl_control_ptr->lock_level = req_lock_level;
#endif  /*end of  #ifdef CONFIG_CPU_FREQ_GOV_K3HOTPLUG*/

#ifdef DEBUG_WIFI_FREQ_LOCK
            temp_jiffies = jiffies;
            freq_cfg_duration = jiffies_to_msecs(temp_jiffies - pre_freq_cfg_jiffies);
            pre_freq_cfg_jiffies = temp_jiffies;
            lfprintf("k3v2_do_freq_lock_work enter refresh lock level, with lock_level=%d, interval=%lu.%lu s\n", 
                req_lock_level, freq_cfg_duration/1000, freq_cfg_duration%1000);
#endif
            
        }       
        
    }else{  
#ifdef DEBUG_WIFI_FREQ_LOCK
        temp_jiffies = jiffies;
        freq_cfg_duration = jiffies_to_msecs(temp_jiffies - pre_release_jiffies);
        lfprintf("k3v2_do_freq_lock_work new lock request from last release time: %lu.%lu s \n", 
             freq_cfg_duration/1000, freq_cfg_duration%1000);
#endif  
        /*freq lock is disabled, enable it now*/
        fl_control_ptr->lock_mod = FREQ_LOCK_ENABLE;
        fl_control_ptr->lock_level = req_lock_level;
#ifdef CONFIG_CPU_FREQ_GOV_K3HOTPLUG        
        pm_qos_add_request(&fl_control_ptr->cpu_qos_request, PM_QOS_CPU_MIN_PROFILE, 
            speed_freq_level[req_lock_level-1].min_cpu_freq);
        if(req_lock_level >= START_DDR_FREQ_LOCK_LEVEL){
            pm_qos_add_request(&fl_control_ptr->ddr_qos_request, PM_QOS_DDR_MIN_PROFILE, 
                speed_freq_level[req_lock_level-1].min_ddr_freq);
        }
#endif 
    }
    
    /*start or reset release timer */
    mod_timer(&freq_lock_control_ptr->lock_freq_timer_list,
                  jiffies +
                  msecs_to_jiffies(FREQ_LOCK_TIMEOUT_VAL)); 
    mutex_unlock(&fl_control_ptr->lock_freq_mtx);
}
Exemple #29
0
int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
    int cmd_idx;
    int ret;

    BUG_ON(cmd->flags & CMD_ASYNC);

    /* A synchronous command can not have a callback set. */
    BUG_ON(cmd->callback);

    if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) {
        IWL_ERR(priv,
                "Error sending %s: Already sending a host command\n",
                get_cmd_string(cmd->id));
        ret = -EBUSY;
        goto out;
    }

    set_bit(STATUS_HCMD_ACTIVE, &priv->status);

    cmd_idx = iwl_enqueue_hcmd(priv, cmd);
    if (cmd_idx < 0) {
        ret = cmd_idx;
        IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
                get_cmd_string(cmd->id), ret);
        goto out;
    }

    ret = wait_event_interruptible_timeout(priv->wait_command_queue,
                                           !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
                                           HOST_COMPLETE_TIMEOUT);
    if (!ret) {
        if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
            IWL_ERR(priv,
                    "Error sending %s: time out after %dms.\n",
                    get_cmd_string(cmd->id),
                    jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

            clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
            ret = -ETIMEDOUT;
            goto cancel;
        }
    }

    if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
        IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
                get_cmd_string(cmd->id));
        ret = -ECANCELED;
        goto fail;
    }
    if (test_bit(STATUS_FW_ERROR, &priv->status)) {
        IWL_ERR(priv, "Command %s failed: FW Error\n",
                get_cmd_string(cmd->id));
        ret = -EIO;
        goto fail;
    }
    if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
        IWL_ERR(priv, "Error: Response NULL in '%s'\n",
                get_cmd_string(cmd->id));
        ret = -EIO;
        goto cancel;
    }

    ret = 0;
    goto out;

cancel:
    if (cmd->flags & CMD_WANT_SKB) {
        /*
         * Cancel the CMD_WANT_SKB flag for the cmd in the
         * TX cmd queue. Otherwise in case the cmd comes
         * in later, it will possibly set an invalid
         * address (cmd->meta.source).
         */
        priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_idx].flags &=
            ~CMD_WANT_SKB;
    }
fail:
    if (cmd->reply_page) {
        free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
        cmd->reply_page = 0;
    }
out:
    clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
    return ret;
}
Exemple #30
0
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
		      struct sk_buff *skb, const struct inet_diag_req_v2 *req,
		      struct user_namespace *user_ns,
		      u32 portid, u32 seq, u16 nlmsg_flags,
		      const struct nlmsghdr *unlh)
{
	const struct inet_sock *inet = inet_sk(sk);
	const struct inet_diag_handler *handler;
	int ext = req->idiag_ext;
	struct inet_diag_msg *r;
	struct nlmsghdr  *nlh;
	struct nlattr *attr;
	void *info = NULL;

	handler = inet_diag_table[req->sdiag_protocol];
	BUG_ON(!handler);

	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
			nlmsg_flags);
	if (!nlh)
		return -EMSGSIZE;

	r = nlmsg_data(nlh);
	BUG_ON(!sk_fullsock(sk));

	inet_diag_msg_common_fill(r, sk);
	r->idiag_state = sk->sk_state;
	r->idiag_timer = 0;
	r->idiag_retrans = 0;

	if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
		goto errout;

	/* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
	 * hence this needs to be included regardless of socket family.
	 */
	if (ext & (1 << (INET_DIAG_TOS - 1)))
		if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
			goto errout;

#if IS_ENABLED(CONFIG_IPV6)
	if (r->idiag_family == AF_INET6) {
		if (ext & (1 << (INET_DIAG_TCLASS - 1)))
			if (nla_put_u8(skb, INET_DIAG_TCLASS,
				       inet6_sk(sk)->tclass) < 0)
				goto errout;
	}
#endif

	r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
	r->idiag_inode = sock_i_ino(sk);

	if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
		struct inet_diag_meminfo minfo = {
			.idiag_rmem = sk_rmem_alloc_get(sk),
			.idiag_wmem = sk->sk_wmem_queued,
			.idiag_fmem = sk->sk_forward_alloc,
			.idiag_tmem = sk_wmem_alloc_get(sk),
		};

		if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
			goto errout;
	}

	if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
		if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
			goto errout;

	if (!icsk) {
		handler->idiag_get_info(sk, r, NULL);
		goto out;
	}

#define EXPIRES_IN_MS(tmo)  DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)

	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
		r->idiag_timer = 1;
		r->idiag_retrans = icsk->icsk_retransmits;
		r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
		r->idiag_timer = 4;
		r->idiag_retrans = icsk->icsk_probes_out;
		r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
	} else if (timer_pending(&sk->sk_timer)) {
		r->idiag_timer = 2;
		r->idiag_retrans = icsk->icsk_probes_out;
		r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
	} else {
		r->idiag_timer = 0;
		r->idiag_expires = 0;
	}
#undef EXPIRES_IN_MS

	if (ext & (1 << (INET_DIAG_INFO - 1))) {
		attr = nla_reserve(skb, INET_DIAG_INFO,
				   sizeof(struct tcp_info));
		if (!attr)
			goto errout;

		info = nla_data(attr);
	}

	if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
		if (nla_put_string(skb, INET_DIAG_CONG,
				   icsk->icsk_ca_ops->name) < 0)
			goto errout;

	handler->idiag_get_info(sk, r, info);

	if (sk->sk_state < TCP_TIME_WAIT &&
	    icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
		icsk->icsk_ca_ops->get_info(sk, ext, skb);

out:
	nlmsg_end(skb, nlh);
	return 0;

errout:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);

static int inet_csk_diag_fill(struct sock *sk,
			      struct sk_buff *skb,
			      const struct inet_diag_req_v2 *req,
			      struct user_namespace *user_ns,
			      u32 portid, u32 seq, u16 nlmsg_flags,
			      const struct nlmsghdr *unlh)
{
	return inet_sk_diag_fill(sk, inet_csk(sk), skb, req,
				 user_ns, portid, seq, nlmsg_flags, unlh);
}

static int inet_twsk_diag_fill(struct sock *sk,
			       struct sk_buff *skb,
			       u32 portid, u32 seq, u16 nlmsg_flags,
			       const struct nlmsghdr *unlh)
{
	struct inet_timewait_sock *tw = inet_twsk(sk);
	struct inet_diag_msg *r;
	struct nlmsghdr *nlh;
	long tmo;

	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
			nlmsg_flags);
	if (!nlh)
		return -EMSGSIZE;

	r = nlmsg_data(nlh);
	BUG_ON(tw->tw_state != TCP_TIME_WAIT);

	tmo = tw->tw_timer.expires - jiffies;
	if (tmo < 0)
		tmo = 0;

	inet_diag_msg_common_fill(r, sk);
	r->idiag_retrans      = 0;

	r->idiag_state	      = tw->tw_substate;
	r->idiag_timer	      = 3;
	r->idiag_expires      = jiffies_to_msecs(tmo);
	r->idiag_rqueue	      = 0;
	r->idiag_wqueue	      = 0;
	r->idiag_uid	      = 0;
	r->idiag_inode	      = 0;

	nlmsg_end(skb, nlh);
	return 0;
}

static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
			      u32 portid, u32 seq, u16 nlmsg_flags,
			      const struct nlmsghdr *unlh)
{
	struct inet_diag_msg *r;
	struct nlmsghdr *nlh;
	long tmo;

	nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
			nlmsg_flags);
	if (!nlh)
		return -EMSGSIZE;

	r = nlmsg_data(nlh);
	inet_diag_msg_common_fill(r, sk);
	r->idiag_state = TCP_SYN_RECV;
	r->idiag_timer = 1;
	r->idiag_retrans = inet_reqsk(sk)->num_retrans;

	BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
		     offsetof(struct sock, sk_cookie));

	tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
	r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
	r->idiag_rqueue	= 0;
	r->idiag_wqueue	= 0;
	r->idiag_uid	= 0;
	r->idiag_inode	= 0;

	nlmsg_end(skb, nlh);
	return 0;
}

static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
			const struct inet_diag_req_v2 *r,
			struct user_namespace *user_ns,
			u32 portid, u32 seq, u16 nlmsg_flags,
			const struct nlmsghdr *unlh)
{
	if (sk->sk_state == TCP_TIME_WAIT)
		return inet_twsk_diag_fill(sk, skb, portid, seq,
					   nlmsg_flags, unlh);

	if (sk->sk_state == TCP_NEW_SYN_RECV)
		return inet_req_diag_fill(sk, skb, portid, seq,
					  nlmsg_flags, unlh);

	return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
				  nlmsg_flags, unlh);
}

int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
			    struct sk_buff *in_skb,
			    const struct nlmsghdr *nlh,
			    const struct inet_diag_req_v2 *req)
{
	struct net *net = sock_net(in_skb->sk);
	struct sk_buff *rep;
	struct sock *sk;
	int err;

	err = -EINVAL;
	if (req->sdiag_family == AF_INET)
		sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
				 req->id.idiag_dport, req->id.idiag_src[0],
				 req->id.idiag_sport, req->id.idiag_if);
#if IS_ENABLED(CONFIG_IPV6)
	else if (req->sdiag_family == AF_INET6)
		sk = inet6_lookup(net, hashinfo,
				  (struct in6_addr *)req->id.idiag_dst,
				  req->id.idiag_dport,
				  (struct in6_addr *)req->id.idiag_src,
				  req->id.idiag_sport,
				  req->id.idiag_if);
#endif
	else
		goto out_nosk;

	err = -ENOENT;
	if (!sk)
		goto out_nosk;

	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
	if (err)
		goto out;

	rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
	if (!rep) {
		err = -ENOMEM;
		goto out;
	}

	err = sk_diag_fill(sk, rep, req,
			   sk_user_ns(NETLINK_CB(in_skb).sk),
			   NETLINK_CB(in_skb).portid,
			   nlh->nlmsg_seq, 0, nlh);
	if (err < 0) {
		WARN_ON(err == -EMSGSIZE);
		nlmsg_free(rep);
		goto out;
	}
	err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
			      MSG_DONTWAIT);
	if (err > 0)
		err = 0;

out:
	if (sk)
		sock_gen_put(sk);

out_nosk:
	return err;
}
EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);

static int inet_diag_get_exact(struct sk_buff *in_skb,
			       const struct nlmsghdr *nlh,
			       const struct inet_diag_req_v2 *req)
{
	const struct inet_diag_handler *handler;
	int err;

	handler = inet_diag_lock_handler(req->sdiag_protocol);
	if (IS_ERR(handler))
		err = PTR_ERR(handler);
	else
		err = handler->dump_one(in_skb, nlh, req);
	inet_diag_unlock_handler(handler);

	return err;
}

static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
{
	int words = bits >> 5;

	bits &= 0x1f;

	if (words) {
		if (memcmp(a1, a2, words << 2))
			return 0;
	}
	if (bits) {
		__be32 w1, w2;
		__be32 mask;

		w1 = a1[words];
		w2 = a2[words];

		mask = htonl((0xffffffff) << (32 - bits));

		if ((w1 ^ w2) & mask)
			return 0;
	}

	return 1;
}